diff options
Diffstat (limited to 'arch/um/kernel')
-rw-r--r-- | arch/um/kernel/dyn.lds.S | 6 | ||||
-rw-r--r-- | arch/um/kernel/physmem.c | 8 | ||||
-rw-r--r-- | arch/um/kernel/process.c | 28 | ||||
-rw-r--r-- | arch/um/kernel/skas/exec_kern.c | 2 | ||||
-rw-r--r-- | arch/um/kernel/skas/include/mm_id.h | 17 | ||||
-rw-r--r-- | arch/um/kernel/skas/include/mmu-skas.h | 7 | ||||
-rw-r--r-- | arch/um/kernel/skas/include/skas.h | 15 | ||||
-rw-r--r-- | arch/um/kernel/skas/mem.c | 6 | ||||
-rw-r--r-- | arch/um/kernel/skas/mem_user.c | 225 | ||||
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 136 | ||||
-rw-r--r-- | arch/um/kernel/skas/process.c | 153 | ||||
-rw-r--r-- | arch/um/kernel/skas/process_kern.c | 33 | ||||
-rw-r--r-- | arch/um/kernel/skas/tlb.c | 28 | ||||
-rw-r--r-- | arch/um/kernel/tlb.c | 132 | ||||
-rw-r--r-- | arch/um/kernel/tt/tlb.c | 4 | ||||
-rw-r--r-- | arch/um/kernel/uml.lds.S | 7 |
16 files changed, 577 insertions, 230 deletions
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index 715b0838a68c..3942a5f245de 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S | |||
@@ -67,6 +67,12 @@ SECTIONS | |||
67 | *(.stub .text.* .gnu.linkonce.t.*) | 67 | *(.stub .text.* .gnu.linkonce.t.*) |
68 | /* .gnu.warning sections are handled specially by elf32.em. */ | 68 | /* .gnu.warning sections are handled specially by elf32.em. */ |
69 | *(.gnu.warning) | 69 | *(.gnu.warning) |
70 | |||
71 | . = ALIGN(4096); | ||
72 | __syscall_stub_start = .; | ||
73 | *(.__syscall_stub*) | ||
74 | __syscall_stub_end = .; | ||
75 | . = ALIGN(4096); | ||
70 | } =0x90909090 | 76 | } =0x90909090 |
71 | .fini : { | 77 | .fini : { |
72 | KEEP (*(.fini)) | 78 | KEEP (*(.fini)) |
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c index 420e6d51fa0f..a24e3b7f4bf0 100644 --- a/arch/um/kernel/physmem.c +++ b/arch/um/kernel/physmem.c | |||
@@ -353,6 +353,8 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len, | |||
353 | 353 | ||
354 | #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) | 354 | #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) |
355 | 355 | ||
356 | extern int __syscall_stub_start, __binary_start; | ||
357 | |||
356 | void setup_physmem(unsigned long start, unsigned long reserve_end, | 358 | void setup_physmem(unsigned long start, unsigned long reserve_end, |
357 | unsigned long len, unsigned long highmem) | 359 | unsigned long len, unsigned long highmem) |
358 | { | 360 | { |
@@ -371,6 +373,12 @@ void setup_physmem(unsigned long start, unsigned long reserve_end, | |||
371 | exit(1); | 373 | exit(1); |
372 | } | 374 | } |
373 | 375 | ||
376 | /* Special kludge - This page will be mapped in to userspace processes | ||
377 | * from physmem_fd, so it needs to be written out there. | ||
378 | */ | ||
379 | os_seek_file(physmem_fd, __pa(&__syscall_stub_start)); | ||
380 | os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE); | ||
381 | |||
374 | bootmap_size = init_bootmem(pfn, pfn + delta); | 382 | bootmap_size = init_bootmem(pfn, pfn + delta); |
375 | free_bootmem(__pa(reserve_end) + bootmap_size, | 383 | free_bootmem(__pa(reserve_end) + bootmap_size, |
376 | len - bootmap_size - reserve); | 384 | len - bootmap_size - reserve); |
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 1b5ef3e96c71..c45a60e9c92d 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "uml-config.h" | 32 | #include "uml-config.h" |
33 | #include "choose-mode.h" | 33 | #include "choose-mode.h" |
34 | #include "mode.h" | 34 | #include "mode.h" |
35 | #include "tempfile.h" | ||
35 | #ifdef UML_CONFIG_MODE_SKAS | 36 | #ifdef UML_CONFIG_MODE_SKAS |
36 | #include "skas.h" | 37 | #include "skas.h" |
37 | #include "skas_ptrace.h" | 38 | #include "skas_ptrace.h" |
@@ -358,11 +359,16 @@ void forward_pending_sigio(int target) | |||
358 | kill(target, SIGIO); | 359 | kill(target, SIGIO); |
359 | } | 360 | } |
360 | 361 | ||
362 | int ptrace_faultinfo = 0; | ||
363 | int proc_mm = 1; | ||
364 | |||
365 | extern void *__syscall_stub_start, __syscall_stub_end; | ||
366 | |||
361 | #ifdef UML_CONFIG_MODE_SKAS | 367 | #ifdef UML_CONFIG_MODE_SKAS |
362 | static inline int check_skas3_ptrace_support(void) | 368 | static inline void check_skas3_ptrace_support(void) |
363 | { | 369 | { |
364 | struct ptrace_faultinfo fi; | 370 | struct ptrace_faultinfo fi; |
365 | int pid, n, ret = 1; | 371 | int pid, n; |
366 | 372 | ||
367 | printf("Checking for the skas3 patch in the host..."); | 373 | printf("Checking for the skas3 patch in the host..."); |
368 | pid = start_ptraced_child(); | 374 | pid = start_ptraced_child(); |
@@ -374,33 +380,31 @@ static inline int check_skas3_ptrace_support(void) | |||
374 | else { | 380 | else { |
375 | perror("not found"); | 381 | perror("not found"); |
376 | } | 382 | } |
377 | ret = 0; | 383 | } |
378 | } else { | 384 | else { |
385 | ptrace_faultinfo = 1; | ||
379 | printf("found\n"); | 386 | printf("found\n"); |
380 | } | 387 | } |
381 | 388 | ||
382 | init_registers(pid); | 389 | init_registers(pid); |
383 | stop_ptraced_child(pid, 1, 1); | 390 | stop_ptraced_child(pid, 1, 1); |
384 | |||
385 | return(ret); | ||
386 | } | 391 | } |
387 | 392 | ||
388 | int can_do_skas(void) | 393 | int can_do_skas(void) |
389 | { | 394 | { |
390 | int ret = 1; | ||
391 | |||
392 | printf("Checking for /proc/mm..."); | 395 | printf("Checking for /proc/mm..."); |
393 | if (os_access("/proc/mm", OS_ACC_W_OK) < 0) { | 396 | if (os_access("/proc/mm", OS_ACC_W_OK) < 0) { |
397 | proc_mm = 0; | ||
394 | printf("not found\n"); | 398 | printf("not found\n"); |
395 | ret = 0; | ||
396 | goto out; | 399 | goto out; |
397 | } else { | 400 | } |
401 | else { | ||
398 | printf("found\n"); | 402 | printf("found\n"); |
399 | } | 403 | } |
400 | 404 | ||
401 | ret = check_skas3_ptrace_support(); | ||
402 | out: | 405 | out: |
403 | return ret; | 406 | check_skas3_ptrace_support(); |
407 | return 1; | ||
404 | } | 408 | } |
405 | #else | 409 | #else |
406 | int can_do_skas(void) | 410 | int can_do_skas(void) |
diff --git a/arch/um/kernel/skas/exec_kern.c b/arch/um/kernel/skas/exec_kern.c index c6b4d5dba789..77ed7bbab219 100644 --- a/arch/um/kernel/skas/exec_kern.c +++ b/arch/um/kernel/skas/exec_kern.c | |||
@@ -18,7 +18,7 @@ | |||
18 | void flush_thread_skas(void) | 18 | void flush_thread_skas(void) |
19 | { | 19 | { |
20 | force_flush_all(); | 20 | force_flush_all(); |
21 | switch_mm_skas(current->mm->context.skas.mm_fd); | 21 | switch_mm_skas(¤t->mm->context.skas.id); |
22 | } | 22 | } |
23 | 23 | ||
24 | void start_thread_skas(struct pt_regs *regs, unsigned long eip, | 24 | void start_thread_skas(struct pt_regs *regs, unsigned long eip, |
diff --git a/arch/um/kernel/skas/include/mm_id.h b/arch/um/kernel/skas/include/mm_id.h new file mode 100644 index 000000000000..48dd0989ddaa --- /dev/null +++ b/arch/um/kernel/skas/include/mm_id.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __MM_ID_H | ||
7 | #define __MM_ID_H | ||
8 | |||
9 | struct mm_id { | ||
10 | union { | ||
11 | int mm_fd; | ||
12 | int pid; | ||
13 | } u; | ||
14 | unsigned long stack; | ||
15 | }; | ||
16 | |||
17 | #endif | ||
diff --git a/arch/um/kernel/skas/include/mmu-skas.h b/arch/um/kernel/skas/include/mmu-skas.h index 4cd60d7213f3..278b72f1d9ad 100644 --- a/arch/um/kernel/skas/include/mmu-skas.h +++ b/arch/um/kernel/skas/include/mmu-skas.h | |||
@@ -6,10 +6,15 @@ | |||
6 | #ifndef __SKAS_MMU_H | 6 | #ifndef __SKAS_MMU_H |
7 | #define __SKAS_MMU_H | 7 | #define __SKAS_MMU_H |
8 | 8 | ||
9 | #include "mm_id.h" | ||
10 | |||
9 | struct mmu_context_skas { | 11 | struct mmu_context_skas { |
10 | int mm_fd; | 12 | struct mm_id id; |
13 | unsigned long last_page_table; | ||
11 | }; | 14 | }; |
12 | 15 | ||
16 | extern void switch_mm_skas(struct mm_id * mm_idp); | ||
17 | |||
13 | #endif | 18 | #endif |
14 | 19 | ||
15 | /* | 20 | /* |
diff --git a/arch/um/kernel/skas/include/skas.h b/arch/um/kernel/skas/include/skas.h index 96b51dba3471..d91a60f3830a 100644 --- a/arch/um/kernel/skas/include/skas.h +++ b/arch/um/kernel/skas/include/skas.h | |||
@@ -6,9 +6,11 @@ | |||
6 | #ifndef __SKAS_H | 6 | #ifndef __SKAS_H |
7 | #define __SKAS_H | 7 | #define __SKAS_H |
8 | 8 | ||
9 | #include "mm_id.h" | ||
9 | #include "sysdep/ptrace.h" | 10 | #include "sysdep/ptrace.h" |
10 | 11 | ||
11 | extern int userspace_pid[]; | 12 | extern int userspace_pid[]; |
13 | extern int proc_mm, ptrace_faultinfo; | ||
12 | 14 | ||
13 | extern void switch_threads(void *me, void *next); | 15 | extern void switch_threads(void *me, void *next); |
14 | extern void thread_wait(void *sw, void *fb); | 16 | extern void thread_wait(void *sw, void *fb); |
@@ -22,16 +24,17 @@ extern void new_thread_proc(void *stack, void (*handler)(int sig)); | |||
22 | extern void remove_sigstack(void); | 24 | extern void remove_sigstack(void); |
23 | extern void new_thread_handler(int sig); | 25 | extern void new_thread_handler(int sig); |
24 | extern void handle_syscall(union uml_pt_regs *regs); | 26 | extern void handle_syscall(union uml_pt_regs *regs); |
25 | extern void map(int fd, unsigned long virt, unsigned long len, int r, int w, | 27 | extern int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len, |
26 | int x, int phys_fd, unsigned long long offset); | 28 | int r, int w, int x, int phys_fd, unsigned long long offset); |
27 | extern int unmap(int fd, void *addr, unsigned long len); | 29 | extern int unmap(struct mm_id * mm_idp, void *addr, unsigned long len); |
28 | extern int protect(int fd, unsigned long addr, unsigned long len, | 30 | extern int protect(struct mm_id * mm_idp, unsigned long addr, |
29 | int r, int w, int x); | 31 | unsigned long len, int r, int w, int x); |
30 | extern void user_signal(int sig, union uml_pt_regs *regs, int pid); | 32 | extern void user_signal(int sig, union uml_pt_regs *regs, int pid); |
31 | extern int new_mm(int from); | 33 | extern int new_mm(int from); |
32 | extern void start_userspace(int cpu); | 34 | extern int start_userspace(unsigned long stub_stack); |
33 | extern void get_skas_faultinfo(int pid, struct faultinfo * fi); | 35 | extern void get_skas_faultinfo(int pid, struct faultinfo * fi); |
34 | extern long execute_syscall_skas(void *r); | 36 | extern long execute_syscall_skas(void *r); |
37 | extern unsigned long current_stub_stack(void); | ||
35 | 38 | ||
36 | #endif | 39 | #endif |
37 | 40 | ||
diff --git a/arch/um/kernel/skas/mem.c b/arch/um/kernel/skas/mem.c index 438db2f43456..147466d7ff4f 100644 --- a/arch/um/kernel/skas/mem.c +++ b/arch/um/kernel/skas/mem.c | |||
@@ -5,7 +5,9 @@ | |||
5 | 5 | ||
6 | #include "linux/config.h" | 6 | #include "linux/config.h" |
7 | #include "linux/mm.h" | 7 | #include "linux/mm.h" |
8 | #include "asm/pgtable.h" | ||
8 | #include "mem_user.h" | 9 | #include "mem_user.h" |
10 | #include "skas.h" | ||
9 | 11 | ||
10 | unsigned long set_task_sizes_skas(int arg, unsigned long *host_size_out, | 12 | unsigned long set_task_sizes_skas(int arg, unsigned long *host_size_out, |
11 | unsigned long *task_size_out) | 13 | unsigned long *task_size_out) |
@@ -18,7 +20,9 @@ unsigned long set_task_sizes_skas(int arg, unsigned long *host_size_out, | |||
18 | *task_size_out = CONFIG_HOST_TASK_SIZE; | 20 | *task_size_out = CONFIG_HOST_TASK_SIZE; |
19 | #else | 21 | #else |
20 | *host_size_out = top; | 22 | *host_size_out = top; |
21 | *task_size_out = top; | 23 | if (proc_mm && ptrace_faultinfo) |
24 | *task_size_out = top; | ||
25 | else *task_size_out = CONFIG_STUB_START & PGDIR_MASK; | ||
22 | #endif | 26 | #endif |
23 | return(((unsigned long) set_task_sizes_skas) & ~0xffffff); | 27 | return(((unsigned long) set_task_sizes_skas) & ~0xffffff); |
24 | } | 28 | } |
diff --git a/arch/um/kernel/skas/mem_user.c b/arch/um/kernel/skas/mem_user.c index 1310bf1e88d1..b0980ff3bd95 100644 --- a/arch/um/kernel/skas/mem_user.c +++ b/arch/um/kernel/skas/mem_user.c | |||
@@ -3,100 +3,171 @@ | |||
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <signal.h> | ||
6 | #include <errno.h> | 7 | #include <errno.h> |
7 | #include <sys/mman.h> | 8 | #include <sys/mman.h> |
9 | #include <sys/wait.h> | ||
10 | #include <asm/page.h> | ||
11 | #include <asm/unistd.h> | ||
8 | #include "mem_user.h" | 12 | #include "mem_user.h" |
9 | #include "mem.h" | 13 | #include "mem.h" |
14 | #include "mm_id.h" | ||
10 | #include "user.h" | 15 | #include "user.h" |
11 | #include "os.h" | 16 | #include "os.h" |
12 | #include "proc_mm.h" | 17 | #include "proc_mm.h" |
13 | 18 | #include "ptrace_user.h" | |
14 | void map(int fd, unsigned long virt, unsigned long len, int r, int w, | 19 | #include "user_util.h" |
15 | int x, int phys_fd, unsigned long long offset) | 20 | #include "kern_util.h" |
21 | #include "task.h" | ||
22 | #include "registers.h" | ||
23 | #include "uml-config.h" | ||
24 | #include "sysdep/ptrace.h" | ||
25 | #include "sysdep/stub.h" | ||
26 | #include "skas.h" | ||
27 | |||
28 | extern unsigned long syscall_stub, __syscall_stub_start; | ||
29 | |||
30 | extern void wait_stub_done(int pid, int sig, char * fname); | ||
31 | |||
32 | static long run_syscall_stub(struct mm_id * mm_idp, int syscall, | ||
33 | unsigned long *args) | ||
16 | { | 34 | { |
17 | struct proc_mm_op map; | 35 | int n, pid = mm_idp->u.pid; |
18 | int prot, n; | 36 | unsigned long regs[MAX_REG_NR]; |
19 | 37 | ||
20 | prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | | 38 | get_safe_registers(regs); |
21 | (x ? PROT_EXEC : 0); | 39 | regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE + |
22 | 40 | ((unsigned long) &syscall_stub - | |
23 | map = ((struct proc_mm_op) { .op = MM_MMAP, | 41 | (unsigned long) &__syscall_stub_start); |
24 | .u = | 42 | /* XXX Don't have a define for starting a syscall */ |
25 | { .mmap = | 43 | regs[REGS_SYSCALL_NR] = syscall; |
26 | { .addr = virt, | 44 | regs[REGS_SYSCALL_ARG1] = args[0]; |
27 | .len = len, | 45 | regs[REGS_SYSCALL_ARG2] = args[1]; |
28 | .prot = prot, | 46 | regs[REGS_SYSCALL_ARG3] = args[2]; |
29 | .flags = MAP_SHARED | | 47 | regs[REGS_SYSCALL_ARG4] = args[3]; |
30 | MAP_FIXED, | 48 | regs[REGS_SYSCALL_ARG5] = args[4]; |
31 | .fd = phys_fd, | 49 | regs[REGS_SYSCALL_ARG6] = args[5]; |
32 | .offset = offset | 50 | n = ptrace_setregs(pid, regs); |
33 | } } } ); | 51 | if(n < 0){ |
34 | n = os_write_file(fd, &map, sizeof(map)); | 52 | printk("run_syscall_stub : PTRACE_SETREGS failed, " |
35 | if(n != sizeof(map)) | 53 | "errno = %d\n", n); |
36 | printk("map : /proc/mm map failed, err = %d\n", -n); | 54 | return(n); |
55 | } | ||
56 | |||
57 | wait_stub_done(pid, 0, "run_syscall_stub"); | ||
58 | |||
59 | return(*((unsigned long *) mm_idp->stack)); | ||
37 | } | 60 | } |
38 | 61 | ||
39 | int unmap(int fd, void *addr, unsigned long len) | 62 | int map(struct mm_id *mm_idp, unsigned long virt, unsigned long len, |
63 | int r, int w, int x, int phys_fd, unsigned long long offset) | ||
40 | { | 64 | { |
41 | struct proc_mm_op unmap; | 65 | int prot, n; |
42 | int n; | 66 | |
43 | 67 | prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | | |
44 | unmap = ((struct proc_mm_op) { .op = MM_MUNMAP, | 68 | (x ? PROT_EXEC : 0); |
45 | .u = | 69 | |
46 | { .munmap = | 70 | if(proc_mm){ |
47 | { .addr = (unsigned long) addr, | 71 | struct proc_mm_op map; |
48 | .len = len } } } ); | 72 | int fd = mm_idp->u.mm_fd; |
49 | n = os_write_file(fd, &unmap, sizeof(unmap)); | 73 | map = ((struct proc_mm_op) { .op = MM_MMAP, |
50 | if(n != sizeof(unmap)) { | 74 | .u = |
51 | if(n < 0) | 75 | { .mmap = |
52 | return(n); | 76 | { .addr = virt, |
53 | else if(n > 0) | 77 | .len = len, |
54 | return(-EIO); | 78 | .prot = prot, |
55 | } | 79 | .flags = MAP_SHARED | |
56 | 80 | MAP_FIXED, | |
57 | return(0); | 81 | .fd = phys_fd, |
82 | .offset= offset | ||
83 | } } } ); | ||
84 | n = os_write_file(fd, &map, sizeof(map)); | ||
85 | if(n != sizeof(map)) | ||
86 | printk("map : /proc/mm map failed, err = %d\n", -n); | ||
87 | } | ||
88 | else { | ||
89 | long res; | ||
90 | unsigned long args[] = { virt, len, prot, | ||
91 | MAP_SHARED | MAP_FIXED, phys_fd, | ||
92 | MMAP_OFFSET(offset) }; | ||
93 | |||
94 | res = run_syscall_stub(mm_idp, STUB_MMAP_NR, args); | ||
95 | if((void *) res == MAP_FAILED) | ||
96 | printk("mmap stub failed, errno = %d\n", res); | ||
97 | } | ||
98 | |||
99 | return 0; | ||
58 | } | 100 | } |
59 | 101 | ||
60 | int protect(int fd, unsigned long addr, unsigned long len, int r, int w, | 102 | int unmap(struct mm_id *mm_idp, void *addr, unsigned long len) |
61 | int x, int must_succeed) | ||
62 | { | 103 | { |
63 | struct proc_mm_op protect; | 104 | int n; |
64 | int prot, n; | 105 | |
65 | 106 | if(proc_mm){ | |
66 | prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | | 107 | struct proc_mm_op unmap; |
67 | (x ? PROT_EXEC : 0); | 108 | int fd = mm_idp->u.mm_fd; |
68 | 109 | unmap = ((struct proc_mm_op) { .op = MM_MUNMAP, | |
69 | protect = ((struct proc_mm_op) { .op = MM_MPROTECT, | 110 | .u = |
70 | .u = | 111 | { .munmap = |
71 | { .mprotect = | 112 | { .addr = |
72 | { .addr = (unsigned long) addr, | 113 | (unsigned long) addr, |
73 | .len = len, | 114 | .len = len } } } ); |
74 | .prot = prot } } } ); | 115 | n = os_write_file(fd, &unmap, sizeof(unmap)); |
75 | 116 | if(n != sizeof(unmap)) { | |
76 | n = os_write_file(fd, &protect, sizeof(protect)); | 117 | if(n < 0) |
77 | if(n != sizeof(protect)) { | 118 | return(n); |
78 | if(n == 0) return(0); | 119 | else if(n > 0) |
79 | 120 | return(-EIO); | |
80 | if(must_succeed) | 121 | } |
81 | panic("protect failed, err = %d", -n); | 122 | } |
82 | 123 | else { | |
83 | return(-EIO); | 124 | int res; |
84 | } | 125 | unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0, |
126 | 0 }; | ||
127 | |||
128 | res = run_syscall_stub(mm_idp, __NR_munmap, args); | ||
129 | if(res < 0) | ||
130 | printk("munmap stub failed, errno = %d\n", res); | ||
131 | } | ||
132 | |||
133 | return(0); | ||
134 | } | ||
85 | 135 | ||
86 | return(0); | 136 | int protect(struct mm_id *mm_idp, unsigned long addr, unsigned long len, |
137 | int r, int w, int x) | ||
138 | { | ||
139 | struct proc_mm_op protect; | ||
140 | int prot, n; | ||
141 | |||
142 | prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | | ||
143 | (x ? PROT_EXEC : 0); | ||
144 | |||
145 | if(proc_mm){ | ||
146 | int fd = mm_idp->u.mm_fd; | ||
147 | protect = ((struct proc_mm_op) { .op = MM_MPROTECT, | ||
148 | .u = | ||
149 | { .mprotect = | ||
150 | { .addr = | ||
151 | (unsigned long) addr, | ||
152 | .len = len, | ||
153 | .prot = prot } } } ); | ||
154 | |||
155 | n = os_write_file(fd, &protect, sizeof(protect)); | ||
156 | if(n != sizeof(protect)) | ||
157 | panic("protect failed, err = %d", -n); | ||
158 | } | ||
159 | else { | ||
160 | int res; | ||
161 | unsigned long args[] = { addr, len, prot, 0, 0, 0 }; | ||
162 | |||
163 | res = run_syscall_stub(mm_idp, __NR_mprotect, args); | ||
164 | if(res < 0) | ||
165 | panic("mprotect stub failed, errno = %d\n", res); | ||
166 | } | ||
167 | |||
168 | return(0); | ||
87 | } | 169 | } |
88 | 170 | ||
89 | void before_mem_skas(unsigned long unused) | 171 | void before_mem_skas(unsigned long unused) |
90 | { | 172 | { |
91 | } | 173 | } |
92 | |||
93 | /* | ||
94 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
95 | * Emacs will notice this stuff at the end of the file and automatically | ||
96 | * adjust the settings for this buffer only. This must remain at the end | ||
97 | * of the file. | ||
98 | * --------------------------------------------------------------------------- | ||
99 | * Local variables: | ||
100 | * c-file-style: "linux" | ||
101 | * End: | ||
102 | */ | ||
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 6cb9a6d028a9..511a855c9ec0 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c | |||
@@ -3,46 +3,138 @@ | |||
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/config.h" | ||
6 | #include "linux/sched.h" | 7 | #include "linux/sched.h" |
7 | #include "linux/list.h" | 8 | #include "linux/list.h" |
8 | #include "linux/spinlock.h" | 9 | #include "linux/spinlock.h" |
9 | #include "linux/slab.h" | 10 | #include "linux/slab.h" |
11 | #include "linux/errno.h" | ||
12 | #include "linux/mm.h" | ||
10 | #include "asm/current.h" | 13 | #include "asm/current.h" |
11 | #include "asm/segment.h" | 14 | #include "asm/segment.h" |
12 | #include "asm/mmu.h" | 15 | #include "asm/mmu.h" |
16 | #include "asm/pgalloc.h" | ||
17 | #include "asm/pgtable.h" | ||
13 | #include "os.h" | 18 | #include "os.h" |
14 | #include "skas.h" | 19 | #include "skas.h" |
15 | 20 | ||
21 | extern int __syscall_stub_start; | ||
22 | |||
23 | static int init_stub_pte(struct mm_struct *mm, unsigned long proc, | ||
24 | unsigned long kernel) | ||
25 | { | ||
26 | pgd_t *pgd; | ||
27 | pud_t *pud; | ||
28 | pmd_t *pmd; | ||
29 | pte_t *pte; | ||
30 | |||
31 | spin_lock(&mm->page_table_lock); | ||
32 | pgd = pgd_offset(mm, proc); | ||
33 | pud = pud_alloc(mm, pgd, proc); | ||
34 | if (!pud) | ||
35 | goto out; | ||
36 | |||
37 | pmd = pmd_alloc(mm, pud, proc); | ||
38 | if (!pmd) | ||
39 | goto out_pmd; | ||
40 | |||
41 | pte = pte_alloc_map(mm, pmd, proc); | ||
42 | if (!pte) | ||
43 | goto out_pte; | ||
44 | |||
45 | /* There's an interaction between the skas0 stub pages, stack | ||
46 | * randomization, and the BUG at the end of exit_mmap. exit_mmap | ||
47 | * checks that the number of page tables freed is the same as had | ||
48 | * been allocated. If the stack is on the last page table page, | ||
49 | * then the stack pte page will be freed, and if not, it won't. To | ||
50 | * avoid having to know where the stack is, or if the process mapped | ||
51 | * something at the top of its address space for some other reason, | ||
52 | * we set TASK_SIZE to end at the start of the last page table. | ||
53 | * This keeps exit_mmap off the last page, but introduces a leak | ||
54 | * of that page. So, we hang onto it here and free it in | ||
55 | * destroy_context_skas. | ||
56 | */ | ||
57 | |||
58 | mm->context.skas.last_page_table = pmd_page_kernel(*pmd); | ||
59 | |||
60 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); | ||
61 | *pte = pte_mkexec(*pte); | ||
62 | *pte = pte_wrprotect(*pte); | ||
63 | spin_unlock(&mm->page_table_lock); | ||
64 | return(0); | ||
65 | |||
66 | out_pmd: | ||
67 | pud_free(pud); | ||
68 | out_pte: | ||
69 | pmd_free(pmd); | ||
70 | out: | ||
71 | spin_unlock(&mm->page_table_lock); | ||
72 | return(-ENOMEM); | ||
73 | } | ||
74 | |||
16 | int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) | 75 | int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) |
17 | { | 76 | { |
18 | int from; | 77 | struct mm_struct *cur_mm = current->mm; |
78 | struct mm_id *mm_id = &mm->context.skas.id; | ||
79 | unsigned long stack; | ||
80 | int from, ret; | ||
19 | 81 | ||
20 | if((current->mm != NULL) && (current->mm != &init_mm)) | 82 | if(proc_mm){ |
21 | from = current->mm->context.skas.mm_fd; | 83 | if((cur_mm != NULL) && (cur_mm != &init_mm)) |
22 | else from = -1; | 84 | from = cur_mm->context.skas.id.u.mm_fd; |
85 | else from = -1; | ||
23 | 86 | ||
24 | mm->context.skas.mm_fd = new_mm(from); | 87 | ret = new_mm(from); |
25 | if(mm->context.skas.mm_fd < 0){ | 88 | if(ret < 0){ |
26 | printk("init_new_context_skas - new_mm failed, errno = %d\n", | 89 | printk("init_new_context_skas - new_mm failed, " |
27 | mm->context.skas.mm_fd); | 90 | "errno = %d\n", ret); |
28 | return(mm->context.skas.mm_fd); | 91 | return ret; |
92 | } | ||
93 | mm_id->u.mm_fd = ret; | ||
29 | } | 94 | } |
95 | else { | ||
96 | /* This zeros the entry that pgd_alloc didn't, needed since | ||
97 | * we are about to reinitialize it, and want mm.nr_ptes to | ||
98 | * be accurate. | ||
99 | */ | ||
100 | mm->pgd[USER_PTRS_PER_PGD] = __pgd(0); | ||
30 | 101 | ||
31 | return(0); | 102 | ret = init_stub_pte(mm, CONFIG_STUB_CODE, |
103 | (unsigned long) &__syscall_stub_start); | ||
104 | if(ret) | ||
105 | goto out; | ||
106 | |||
107 | ret = -ENOMEM; | ||
108 | stack = get_zeroed_page(GFP_KERNEL); | ||
109 | if(stack == 0) | ||
110 | goto out; | ||
111 | mm_id->stack = stack; | ||
112 | |||
113 | ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); | ||
114 | if(ret) | ||
115 | goto out_free; | ||
116 | |||
117 | mm->nr_ptes--; | ||
118 | mm_id->u.pid = start_userspace(stack); | ||
119 | } | ||
120 | |||
121 | return 0; | ||
122 | |||
123 | out_free: | ||
124 | free_page(mm_id->stack); | ||
125 | out: | ||
126 | return ret; | ||
32 | } | 127 | } |
33 | 128 | ||
34 | void destroy_context_skas(struct mm_struct *mm) | 129 | void destroy_context_skas(struct mm_struct *mm) |
35 | { | 130 | { |
36 | os_close_file(mm->context.skas.mm_fd); | 131 | struct mmu_context_skas *mmu = &mm->context.skas; |
37 | } | ||
38 | 132 | ||
39 | /* | 133 | if(proc_mm) |
40 | * Overrides for Emacs so that we follow Linus's tabbing style. | 134 | os_close_file(mmu->id.u.mm_fd); |
41 | * Emacs will notice this stuff at the end of the file and automatically | 135 | else { |
42 | * adjust the settings for this buffer only. This must remain at the end | 136 | os_kill_ptraced_process(mmu->id.u.pid, 1); |
43 | * of the file. | 137 | free_page(mmu->id.stack); |
44 | * --------------------------------------------------------------------------- | 138 | free_page(mmu->last_page_table); |
45 | * Local variables: | 139 | } |
46 | * c-file-style: "linux" | 140 | } |
47 | * End: | ||
48 | */ | ||
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c index 773cd2b525fc..1647abb0d1aa 100644 --- a/arch/um/kernel/skas/process.c +++ b/arch/um/kernel/skas/process.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2002- 2004 Jeff Dike (jdike@addtoit.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
@@ -14,6 +14,7 @@ | |||
14 | #include <sys/mman.h> | 14 | #include <sys/mman.h> |
15 | #include <sys/user.h> | 15 | #include <sys/user.h> |
16 | #include <asm/unistd.h> | 16 | #include <asm/unistd.h> |
17 | #include <asm/types.h> | ||
17 | #include "user.h" | 18 | #include "user.h" |
18 | #include "ptrace_user.h" | 19 | #include "ptrace_user.h" |
19 | #include "time_user.h" | 20 | #include "time_user.h" |
@@ -21,13 +22,17 @@ | |||
21 | #include "user_util.h" | 22 | #include "user_util.h" |
22 | #include "kern_util.h" | 23 | #include "kern_util.h" |
23 | #include "skas.h" | 24 | #include "skas.h" |
25 | #include "mm_id.h" | ||
24 | #include "sysdep/sigcontext.h" | 26 | #include "sysdep/sigcontext.h" |
27 | #include "sysdep/stub.h" | ||
25 | #include "os.h" | 28 | #include "os.h" |
26 | #include "proc_mm.h" | 29 | #include "proc_mm.h" |
27 | #include "skas_ptrace.h" | 30 | #include "skas_ptrace.h" |
28 | #include "chan_user.h" | 31 | #include "chan_user.h" |
29 | #include "signal_user.h" | 32 | #include "signal_user.h" |
30 | #include "registers.h" | 33 | #include "registers.h" |
34 | #include "mem.h" | ||
35 | #include "uml-config.h" | ||
31 | #include "process.h" | 36 | #include "process.h" |
32 | 37 | ||
33 | int is_skas_winch(int pid, int fd, void *data) | 38 | int is_skas_winch(int pid, int fd, void *data) |
@@ -39,20 +44,55 @@ int is_skas_winch(int pid, int fd, void *data) | |||
39 | return(1); | 44 | return(1); |
40 | } | 45 | } |
41 | 46 | ||
42 | void get_skas_faultinfo(int pid, struct faultinfo * fi) | 47 | void wait_stub_done(int pid, int sig, char * fname) |
43 | { | 48 | { |
44 | int err; | 49 | int n, status, err; |
45 | 50 | ||
46 | err = ptrace(PTRACE_FAULTINFO, pid, 0, fi); | 51 | do { |
47 | if(err) | 52 | if ( sig != -1 ) { |
48 | panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, " | 53 | err = ptrace(PTRACE_CONT, pid, 0, sig); |
49 | "errno = %d\n", errno); | 54 | if(err) |
55 | panic("%s : continue failed, errno = %d\n", | ||
56 | fname, errno); | ||
57 | } | ||
58 | sig = 0; | ||
59 | |||
60 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); | ||
61 | } while((n >= 0) && WIFSTOPPED(status) && | ||
62 | (WSTOPSIG(status) == SIGVTALRM)); | ||
63 | |||
64 | if((n < 0) || !WIFSTOPPED(status) || | ||
65 | (WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status != SIGTRAP))){ | ||
66 | panic("%s : failed to wait for SIGUSR1/SIGTRAP, " | ||
67 | "pid = %d, n = %d, errno = %d, status = 0x%x\n", | ||
68 | fname, pid, n, errno, status); | ||
69 | } | ||
70 | } | ||
50 | 71 | ||
51 | /* Special handling for i386, which has different structs */ | 72 | void get_skas_faultinfo(int pid, struct faultinfo * fi) |
52 | if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo)) | 73 | { |
53 | memset((char *)fi + sizeof(struct ptrace_faultinfo), 0, | 74 | int err; |
54 | sizeof(struct faultinfo) - | 75 | |
55 | sizeof(struct ptrace_faultinfo)); | 76 | if(ptrace_faultinfo){ |
77 | err = ptrace(PTRACE_FAULTINFO, pid, 0, fi); | ||
78 | if(err) | ||
79 | panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, " | ||
80 | "errno = %d\n", errno); | ||
81 | |||
82 | /* Special handling for i386, which has different structs */ | ||
83 | if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo)) | ||
84 | memset((char *)fi + sizeof(struct ptrace_faultinfo), 0, | ||
85 | sizeof(struct faultinfo) - | ||
86 | sizeof(struct ptrace_faultinfo)); | ||
87 | } | ||
88 | else { | ||
89 | wait_stub_done(pid, SIGSEGV, "get_skas_faultinfo"); | ||
90 | |||
91 | /* faultinfo is prepared by the stub-segv-handler at start of | ||
92 | * the stub stack page. We just have to copy it. | ||
93 | */ | ||
94 | memcpy(fi, (void *)current_stub_stack(), sizeof(*fi)); | ||
95 | } | ||
56 | } | 96 | } |
57 | 97 | ||
58 | static void handle_segv(int pid, union uml_pt_regs * regs) | 98 | static void handle_segv(int pid, union uml_pt_regs * regs) |
@@ -91,11 +131,56 @@ static void handle_trap(int pid, union uml_pt_regs *regs, int local_using_sysemu | |||
91 | handle_syscall(regs); | 131 | handle_syscall(regs); |
92 | } | 132 | } |
93 | 133 | ||
94 | static int userspace_tramp(void *arg) | 134 | extern int __syscall_stub_start; |
135 | |||
136 | static int userspace_tramp(void *stack) | ||
95 | { | 137 | { |
96 | init_new_thread_signals(0); | 138 | void *addr; |
97 | enable_timer(); | 139 | |
98 | ptrace(PTRACE_TRACEME, 0, 0, 0); | 140 | ptrace(PTRACE_TRACEME, 0, 0, 0); |
141 | |||
142 | init_new_thread_signals(1); | ||
143 | enable_timer(); | ||
144 | |||
145 | if(!proc_mm){ | ||
146 | /* This has a pte, but it can't be mapped in with the usual | ||
147 | * tlb_flush mechanism because this is part of that mechanism | ||
148 | */ | ||
149 | int fd; | ||
150 | __u64 offset; | ||
151 | |||
152 | fd = phys_mapping(to_phys(&__syscall_stub_start), &offset); | ||
153 | addr = mmap64((void *) UML_CONFIG_STUB_CODE, page_size(), | ||
154 | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); | ||
155 | if(addr == MAP_FAILED){ | ||
156 | printk("mapping mmap stub failed, errno = %d\n", | ||
157 | errno); | ||
158 | exit(1); | ||
159 | } | ||
160 | |||
161 | if(stack != NULL){ | ||
162 | fd = phys_mapping(to_phys(stack), &offset); | ||
163 | addr = mmap((void *) UML_CONFIG_STUB_DATA, page_size(), | ||
164 | PROT_READ | PROT_WRITE, | ||
165 | MAP_FIXED | MAP_SHARED, fd, offset); | ||
166 | if(addr == MAP_FAILED){ | ||
167 | printk("mapping segfault stack failed, " | ||
168 | "errno = %d\n", errno); | ||
169 | exit(1); | ||
170 | } | ||
171 | } | ||
172 | } | ||
173 | if(!ptrace_faultinfo && (stack != NULL)){ | ||
174 | unsigned long v = UML_CONFIG_STUB_CODE + | ||
175 | (unsigned long) stub_segv_handler - | ||
176 | (unsigned long) &__syscall_stub_start; | ||
177 | |||
178 | set_sigstack((void *) UML_CONFIG_STUB_DATA, page_size()); | ||
179 | set_handler(SIGSEGV, (void *) v, SA_ONSTACK, | ||
180 | SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, | ||
181 | SIGUSR1, -1); | ||
182 | } | ||
183 | |||
99 | os_stop_process(os_getpid()); | 184 | os_stop_process(os_getpid()); |
100 | return(0); | 185 | return(0); |
101 | } | 186 | } |
@@ -105,11 +190,11 @@ static int userspace_tramp(void *arg) | |||
105 | #define NR_CPUS 1 | 190 | #define NR_CPUS 1 |
106 | int userspace_pid[NR_CPUS]; | 191 | int userspace_pid[NR_CPUS]; |
107 | 192 | ||
108 | void start_userspace(int cpu) | 193 | int start_userspace(unsigned long stub_stack) |
109 | { | 194 | { |
110 | void *stack; | 195 | void *stack; |
111 | unsigned long sp; | 196 | unsigned long sp; |
112 | int pid, status, n; | 197 | int pid, status, n, flags; |
113 | 198 | ||
114 | stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, | 199 | stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, |
115 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | 200 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
@@ -117,8 +202,9 @@ void start_userspace(int cpu) | |||
117 | panic("start_userspace : mmap failed, errno = %d", errno); | 202 | panic("start_userspace : mmap failed, errno = %d", errno); |
118 | sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *); | 203 | sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *); |
119 | 204 | ||
120 | pid = clone(userspace_tramp, (void *) sp, | 205 | flags = CLONE_FILES | SIGCHLD; |
121 | CLONE_FILES | CLONE_VM | SIGCHLD, NULL); | 206 | if(proc_mm) flags |= CLONE_VM; |
207 | pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack); | ||
122 | if(pid < 0) | 208 | if(pid < 0) |
123 | panic("start_userspace : clone failed, errno = %d", errno); | 209 | panic("start_userspace : clone failed, errno = %d", errno); |
124 | 210 | ||
@@ -140,7 +226,7 @@ void start_userspace(int cpu) | |||
140 | if(munmap(stack, PAGE_SIZE) < 0) | 226 | if(munmap(stack, PAGE_SIZE) < 0) |
141 | panic("start_userspace : munmap failed, errno = %d\n", errno); | 227 | panic("start_userspace : munmap failed, errno = %d\n", errno); |
142 | 228 | ||
143 | userspace_pid[cpu] = pid; | 229 | return(pid); |
144 | } | 230 | } |
145 | 231 | ||
146 | void userspace(union uml_pt_regs *regs) | 232 | void userspace(union uml_pt_regs *regs) |
@@ -174,7 +260,9 @@ void userspace(union uml_pt_regs *regs) | |||
174 | if(WIFSTOPPED(status)){ | 260 | if(WIFSTOPPED(status)){ |
175 | switch(WSTOPSIG(status)){ | 261 | switch(WSTOPSIG(status)){ |
176 | case SIGSEGV: | 262 | case SIGSEGV: |
177 | handle_segv(pid, regs); | 263 | if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo) |
264 | user_signal(SIGSEGV, regs, pid); | ||
265 | else handle_segv(pid, regs); | ||
178 | break; | 266 | break; |
179 | case SIGTRAP + 0x80: | 267 | case SIGTRAP + 0x80: |
180 | handle_trap(pid, regs, local_using_sysemu); | 268 | handle_trap(pid, regs, local_using_sysemu); |
@@ -194,6 +282,7 @@ void userspace(union uml_pt_regs *regs) | |||
194 | printk("userspace - child stopped with signal " | 282 | printk("userspace - child stopped with signal " |
195 | "%d\n", WSTOPSIG(status)); | 283 | "%d\n", WSTOPSIG(status)); |
196 | } | 284 | } |
285 | pid = userspace_pid[0]; | ||
197 | interrupt_end(); | 286 | interrupt_end(); |
198 | 287 | ||
199 | /* Avoid -ERESTARTSYS handling in host */ | 288 | /* Avoid -ERESTARTSYS handling in host */ |
@@ -334,21 +423,19 @@ void reboot_skas(void) | |||
334 | siglongjmp(initial_jmpbuf, INIT_JMP_REBOOT); | 423 | siglongjmp(initial_jmpbuf, INIT_JMP_REBOOT); |
335 | } | 424 | } |
336 | 425 | ||
337 | void switch_mm_skas(int mm_fd) | 426 | void switch_mm_skas(struct mm_id *mm_idp) |
338 | { | 427 | { |
339 | int err; | 428 | int err; |
340 | 429 | ||
341 | #warning need cpu pid in switch_mm_skas | 430 | #warning need cpu pid in switch_mm_skas |
342 | err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, mm_fd); | 431 | if(proc_mm){ |
343 | if(err) | 432 | err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, |
344 | panic("switch_mm_skas - PTRACE_SWITCH_MM failed, errno = %d\n", | 433 | mm_idp->u.mm_fd); |
345 | errno); | 434 | if(err) |
346 | } | 435 | panic("switch_mm_skas - PTRACE_SWITCH_MM failed, " |
347 | 436 | "errno = %d\n", errno); | |
348 | void kill_off_processes_skas(void) | 437 | } |
349 | { | 438 | else userspace_pid[0] = mm_idp->u.pid; |
350 | #warning need to loop over userspace_pids in kill_off_processes_skas | ||
351 | os_kill_ptraced_process(userspace_pid[0], 1); | ||
352 | } | 439 | } |
353 | 440 | ||
354 | /* | 441 | /* |
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c index 0a7b8aa55db8..cbabab104ac3 100644 --- a/arch/um/kernel/skas/process_kern.c +++ b/arch/um/kernel/skas/process_kern.c | |||
@@ -175,9 +175,12 @@ static int start_kernel_proc(void *unused) | |||
175 | return(0); | 175 | return(0); |
176 | } | 176 | } |
177 | 177 | ||
178 | extern int userspace_pid[]; | ||
179 | |||
178 | int start_uml_skas(void) | 180 | int start_uml_skas(void) |
179 | { | 181 | { |
180 | start_userspace(0); | 182 | if(proc_mm) |
183 | userspace_pid[0] = start_userspace(0); | ||
181 | 184 | ||
182 | init_new_thread_signals(1); | 185 | init_new_thread_signals(1); |
183 | 186 | ||
@@ -199,3 +202,31 @@ int thread_pid_skas(struct task_struct *task) | |||
199 | #warning Need to look up userspace_pid by cpu | 202 | #warning Need to look up userspace_pid by cpu |
200 | return(userspace_pid[0]); | 203 | return(userspace_pid[0]); |
201 | } | 204 | } |
205 | |||
206 | void kill_off_processes_skas(void) | ||
207 | { | ||
208 | if(proc_mm) | ||
209 | #warning need to loop over userspace_pids in kill_off_processes_skas | ||
210 | os_kill_ptraced_process(userspace_pid[0], 1); | ||
211 | else { | ||
212 | struct task_struct *p; | ||
213 | int pid, me; | ||
214 | |||
215 | me = os_getpid(); | ||
216 | for_each_process(p){ | ||
217 | if(p->mm == NULL) | ||
218 | continue; | ||
219 | |||
220 | pid = p->mm->context.skas.id.u.pid; | ||
221 | os_kill_ptraced_process(pid, 1); | ||
222 | } | ||
223 | } | ||
224 | } | ||
225 | |||
226 | unsigned long current_stub_stack(void) | ||
227 | { | ||
228 | if(current->mm == NULL) | ||
229 | return(0); | ||
230 | |||
231 | return(current->mm->context.skas.id.stack); | ||
232 | } | ||
diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c index 18f9a7711de1..6230999c672c 100644 --- a/arch/um/kernel/skas/tlb.c +++ b/arch/um/kernel/skas/tlb.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include "linux/stddef.h" | 7 | #include "linux/stddef.h" |
8 | #include "linux/sched.h" | 8 | #include "linux/sched.h" |
9 | #include "linux/config.h" | ||
9 | #include "linux/mm.h" | 10 | #include "linux/mm.h" |
10 | #include "asm/page.h" | 11 | #include "asm/page.h" |
11 | #include "asm/pgtable.h" | 12 | #include "asm/pgtable.h" |
@@ -17,7 +18,7 @@ | |||
17 | #include "os.h" | 18 | #include "os.h" |
18 | #include "tlb.h" | 19 | #include "tlb.h" |
19 | 20 | ||
20 | static void do_ops(int fd, struct host_vm_op *ops, int last) | 21 | static void do_ops(union mm_context *mmu, struct host_vm_op *ops, int last) |
21 | { | 22 | { |
22 | struct host_vm_op *op; | 23 | struct host_vm_op *op; |
23 | int i; | 24 | int i; |
@@ -26,18 +27,18 @@ static void do_ops(int fd, struct host_vm_op *ops, int last) | |||
26 | op = &ops[i]; | 27 | op = &ops[i]; |
27 | switch(op->type){ | 28 | switch(op->type){ |
28 | case MMAP: | 29 | case MMAP: |
29 | map(fd, op->u.mmap.addr, op->u.mmap.len, | 30 | map(&mmu->skas.id, op->u.mmap.addr, op->u.mmap.len, |
30 | op->u.mmap.r, op->u.mmap.w, op->u.mmap.x, | 31 | op->u.mmap.r, op->u.mmap.w, op->u.mmap.x, |
31 | op->u.mmap.fd, op->u.mmap.offset); | 32 | op->u.mmap.fd, op->u.mmap.offset); |
32 | break; | 33 | break; |
33 | case MUNMAP: | 34 | case MUNMAP: |
34 | unmap(fd, (void *) op->u.munmap.addr, | 35 | unmap(&mmu->skas.id, (void *) op->u.munmap.addr, |
35 | op->u.munmap.len); | 36 | op->u.munmap.len); |
36 | break; | 37 | break; |
37 | case MPROTECT: | 38 | case MPROTECT: |
38 | protect(fd, op->u.mprotect.addr, op->u.mprotect.len, | 39 | protect(&mmu->skas.id, op->u.mprotect.addr, |
39 | op->u.mprotect.r, op->u.mprotect.w, | 40 | op->u.mprotect.len, op->u.mprotect.r, |
40 | op->u.mprotect.x); | 41 | op->u.mprotect.w, op->u.mprotect.x); |
41 | break; | 42 | break; |
42 | default: | 43 | default: |
43 | printk("Unknown op type %d in do_ops\n", op->type); | 44 | printk("Unknown op type %d in do_ops\n", op->type); |
@@ -46,12 +47,15 @@ static void do_ops(int fd, struct host_vm_op *ops, int last) | |||
46 | } | 47 | } |
47 | } | 48 | } |
48 | 49 | ||
50 | extern int proc_mm; | ||
51 | |||
49 | static void fix_range(struct mm_struct *mm, unsigned long start_addr, | 52 | static void fix_range(struct mm_struct *mm, unsigned long start_addr, |
50 | unsigned long end_addr, int force) | 53 | unsigned long end_addr, int force) |
51 | { | 54 | { |
52 | int fd = mm->context.skas.mm_fd; | 55 | if(!proc_mm && (end_addr > CONFIG_STUB_START)) |
56 | end_addr = CONFIG_STUB_START; | ||
53 | 57 | ||
54 | fix_range_common(mm, start_addr, end_addr, force, fd, do_ops); | 58 | fix_range_common(mm, start_addr, end_addr, force, do_ops); |
55 | } | 59 | } |
56 | 60 | ||
57 | void __flush_tlb_one_skas(unsigned long addr) | 61 | void __flush_tlb_one_skas(unsigned long addr) |
@@ -69,16 +73,20 @@ void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start, | |||
69 | 73 | ||
70 | void flush_tlb_mm_skas(struct mm_struct *mm) | 74 | void flush_tlb_mm_skas(struct mm_struct *mm) |
71 | { | 75 | { |
76 | unsigned long end; | ||
77 | |||
72 | /* Don't bother flushing if this address space is about to be | 78 | /* Don't bother flushing if this address space is about to be |
73 | * destroyed. | 79 | * destroyed. |
74 | */ | 80 | */ |
75 | if(atomic_read(&mm->mm_users) == 0) | 81 | if(atomic_read(&mm->mm_users) == 0) |
76 | return; | 82 | return; |
77 | 83 | ||
78 | fix_range(mm, 0, host_task_size, 0); | 84 | end = proc_mm ? task_size : CONFIG_STUB_START; |
85 | fix_range(mm, 0, end, 0); | ||
79 | } | 86 | } |
80 | 87 | ||
81 | void force_flush_all_skas(void) | 88 | void force_flush_all_skas(void) |
82 | { | 89 | { |
83 | fix_range(current->mm, 0, host_task_size, 1); | 90 | unsigned long end = proc_mm ? task_size : CONFIG_STUB_START; |
91 | fix_range(current->mm, 0, end, 1); | ||
84 | } | 92 | } |
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index eda477edfdf5..83ec8d4747fd 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c | |||
@@ -18,13 +18,15 @@ | |||
18 | #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) | 18 | #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) |
19 | 19 | ||
20 | void fix_range_common(struct mm_struct *mm, unsigned long start_addr, | 20 | void fix_range_common(struct mm_struct *mm, unsigned long start_addr, |
21 | unsigned long end_addr, int force, int data, | 21 | unsigned long end_addr, int force, |
22 | void (*do_ops)(int, struct host_vm_op *, int)) | 22 | void (*do_ops)(union mm_context *, struct host_vm_op *, |
23 | int)) | ||
23 | { | 24 | { |
24 | pgd_t *npgd; | 25 | pgd_t *npgd; |
25 | pud_t *npud; | 26 | pud_t *npud; |
26 | pmd_t *npmd; | 27 | pmd_t *npmd; |
27 | pte_t *npte; | 28 | pte_t *npte; |
29 | union mm_context *mmu = &mm->context; | ||
28 | unsigned long addr, end; | 30 | unsigned long addr, end; |
29 | int r, w, x; | 31 | int r, w, x; |
30 | struct host_vm_op ops[16]; | 32 | struct host_vm_op ops[16]; |
@@ -40,7 +42,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, | |||
40 | end = end_addr; | 42 | end = end_addr; |
41 | if(force || pgd_newpage(*npgd)){ | 43 | if(force || pgd_newpage(*npgd)){ |
42 | op_index = add_munmap(addr, end - addr, ops, | 44 | op_index = add_munmap(addr, end - addr, ops, |
43 | op_index, last_op, data, | 45 | op_index, last_op, mmu, |
44 | do_ops); | 46 | do_ops); |
45 | pgd_mkuptodate(*npgd); | 47 | pgd_mkuptodate(*npgd); |
46 | } | 48 | } |
@@ -55,7 +57,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, | |||
55 | end = end_addr; | 57 | end = end_addr; |
56 | if(force || pud_newpage(*npud)){ | 58 | if(force || pud_newpage(*npud)){ |
57 | op_index = add_munmap(addr, end - addr, ops, | 59 | op_index = add_munmap(addr, end - addr, ops, |
58 | op_index, last_op, data, | 60 | op_index, last_op, mmu, |
59 | do_ops); | 61 | do_ops); |
60 | pud_mkuptodate(*npud); | 62 | pud_mkuptodate(*npud); |
61 | } | 63 | } |
@@ -70,7 +72,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, | |||
70 | end = end_addr; | 72 | end = end_addr; |
71 | if(force || pmd_newpage(*npmd)){ | 73 | if(force || pmd_newpage(*npmd)){ |
72 | op_index = add_munmap(addr, end - addr, ops, | 74 | op_index = add_munmap(addr, end - addr, ops, |
73 | op_index, last_op, data, | 75 | op_index, last_op, mmu, |
74 | do_ops); | 76 | do_ops); |
75 | pmd_mkuptodate(*npmd); | 77 | pmd_mkuptodate(*npmd); |
76 | } | 78 | } |
@@ -93,21 +95,21 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, | |||
93 | op_index = add_mmap(addr, | 95 | op_index = add_mmap(addr, |
94 | pte_val(*npte) & PAGE_MASK, | 96 | pte_val(*npte) & PAGE_MASK, |
95 | PAGE_SIZE, r, w, x, ops, | 97 | PAGE_SIZE, r, w, x, ops, |
96 | op_index, last_op, data, | 98 | op_index, last_op, mmu, |
97 | do_ops); | 99 | do_ops); |
98 | else op_index = add_munmap(addr, PAGE_SIZE, ops, | 100 | else op_index = add_munmap(addr, PAGE_SIZE, ops, |
99 | op_index, last_op, data, | 101 | op_index, last_op, mmu, |
100 | do_ops); | 102 | do_ops); |
101 | } | 103 | } |
102 | else if(pte_newprot(*npte)) | 104 | else if(pte_newprot(*npte)) |
103 | op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops, | 105 | op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops, |
104 | op_index, last_op, data, | 106 | op_index, last_op, mmu, |
105 | do_ops); | 107 | do_ops); |
106 | 108 | ||
107 | *npte = pte_mkuptodate(*npte); | 109 | *npte = pte_mkuptodate(*npte); |
108 | addr += PAGE_SIZE; | 110 | addr += PAGE_SIZE; |
109 | } | 111 | } |
110 | (*do_ops)(data, ops, op_index); | 112 | (*do_ops)(mmu, ops, op_index); |
111 | } | 113 | } |
112 | 114 | ||
113 | int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | 115 | int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) |
@@ -195,51 +197,6 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
195 | return(updated); | 197 | return(updated); |
196 | } | 198 | } |
197 | 199 | ||
198 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) | ||
199 | { | ||
200 | address &= PAGE_MASK; | ||
201 | flush_tlb_range(vma, address, address + PAGE_SIZE); | ||
202 | } | ||
203 | |||
204 | void flush_tlb_all(void) | ||
205 | { | ||
206 | flush_tlb_mm(current->mm); | ||
207 | } | ||
208 | |||
209 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
210 | { | ||
211 | CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt, | ||
212 | flush_tlb_kernel_range_common, start, end); | ||
213 | } | ||
214 | |||
215 | void flush_tlb_kernel_vm(void) | ||
216 | { | ||
217 | CHOOSE_MODE(flush_tlb_kernel_vm_tt(), | ||
218 | flush_tlb_kernel_range_common(start_vm, end_vm)); | ||
219 | } | ||
220 | |||
221 | void __flush_tlb_one(unsigned long addr) | ||
222 | { | ||
223 | CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr); | ||
224 | } | ||
225 | |||
226 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
227 | unsigned long end) | ||
228 | { | ||
229 | CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start, | ||
230 | end); | ||
231 | } | ||
232 | |||
233 | void flush_tlb_mm(struct mm_struct *mm) | ||
234 | { | ||
235 | CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm); | ||
236 | } | ||
237 | |||
238 | void force_flush_all(void) | ||
239 | { | ||
240 | CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas()); | ||
241 | } | ||
242 | |||
243 | pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) | 200 | pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) |
244 | { | 201 | { |
245 | return(pgd_offset(mm, address)); | 202 | return(pgd_offset(mm, address)); |
@@ -270,9 +227,9 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr) | |||
270 | } | 227 | } |
271 | 228 | ||
272 | int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, | 229 | int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, |
273 | int r, int w, int x, struct host_vm_op *ops, int index, | 230 | int r, int w, int x, struct host_vm_op *ops, int index, |
274 | int last_filled, int data, | 231 | int last_filled, union mm_context *mmu, |
275 | void (*do_ops)(int, struct host_vm_op *, int)) | 232 | void (*do_ops)(union mm_context *, struct host_vm_op *, int)) |
276 | { | 233 | { |
277 | __u64 offset; | 234 | __u64 offset; |
278 | struct host_vm_op *last; | 235 | struct host_vm_op *last; |
@@ -292,7 +249,7 @@ int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, | |||
292 | } | 249 | } |
293 | 250 | ||
294 | if(index == last_filled){ | 251 | if(index == last_filled){ |
295 | (*do_ops)(data, ops, last_filled); | 252 | (*do_ops)(mmu, ops, last_filled); |
296 | index = -1; | 253 | index = -1; |
297 | } | 254 | } |
298 | 255 | ||
@@ -310,8 +267,8 @@ int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, | |||
310 | } | 267 | } |
311 | 268 | ||
312 | int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops, | 269 | int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops, |
313 | int index, int last_filled, int data, | 270 | int index, int last_filled, union mm_context *mmu, |
314 | void (*do_ops)(int, struct host_vm_op *, int)) | 271 | void (*do_ops)(union mm_context *, struct host_vm_op *, int)) |
315 | { | 272 | { |
316 | struct host_vm_op *last; | 273 | struct host_vm_op *last; |
317 | 274 | ||
@@ -325,7 +282,7 @@ int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops, | |||
325 | } | 282 | } |
326 | 283 | ||
327 | if(index == last_filled){ | 284 | if(index == last_filled){ |
328 | (*do_ops)(data, ops, last_filled); | 285 | (*do_ops)(mmu, ops, last_filled); |
329 | index = -1; | 286 | index = -1; |
330 | } | 287 | } |
331 | 288 | ||
@@ -337,8 +294,9 @@ int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops, | |||
337 | } | 294 | } |
338 | 295 | ||
339 | int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x, | 296 | int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x, |
340 | struct host_vm_op *ops, int index, int last_filled, int data, | 297 | struct host_vm_op *ops, int index, int last_filled, |
341 | void (*do_ops)(int, struct host_vm_op *, int)) | 298 | union mm_context *mmu, |
299 | void (*do_ops)(union mm_context *, struct host_vm_op *, int)) | ||
342 | { | 300 | { |
343 | struct host_vm_op *last; | 301 | struct host_vm_op *last; |
344 | 302 | ||
@@ -354,7 +312,7 @@ int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x, | |||
354 | } | 312 | } |
355 | 313 | ||
356 | if(index == last_filled){ | 314 | if(index == last_filled){ |
357 | (*do_ops)(data, ops, last_filled); | 315 | (*do_ops)(mmu, ops, last_filled); |
358 | index = -1; | 316 | index = -1; |
359 | } | 317 | } |
360 | 318 | ||
@@ -367,3 +325,49 @@ int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x, | |||
367 | .x = x } } }); | 325 | .x = x } } }); |
368 | return(index); | 326 | return(index); |
369 | } | 327 | } |
328 | |||
329 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) | ||
330 | { | ||
331 | address &= PAGE_MASK; | ||
332 | flush_tlb_range(vma, address, address + PAGE_SIZE); | ||
333 | } | ||
334 | |||
335 | void flush_tlb_all(void) | ||
336 | { | ||
337 | flush_tlb_mm(current->mm); | ||
338 | } | ||
339 | |||
340 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
341 | { | ||
342 | CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt, | ||
343 | flush_tlb_kernel_range_common, start, end); | ||
344 | } | ||
345 | |||
346 | void flush_tlb_kernel_vm(void) | ||
347 | { | ||
348 | CHOOSE_MODE(flush_tlb_kernel_vm_tt(), | ||
349 | flush_tlb_kernel_range_common(start_vm, end_vm)); | ||
350 | } | ||
351 | |||
352 | void __flush_tlb_one(unsigned long addr) | ||
353 | { | ||
354 | CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr); | ||
355 | } | ||
356 | |||
357 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
358 | unsigned long end) | ||
359 | { | ||
360 | CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start, | ||
361 | end); | ||
362 | } | ||
363 | |||
364 | void flush_tlb_mm(struct mm_struct *mm) | ||
365 | { | ||
366 | CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm); | ||
367 | } | ||
368 | |||
369 | void force_flush_all(void) | ||
370 | { | ||
371 | CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas()); | ||
372 | } | ||
373 | |||
diff --git a/arch/um/kernel/tt/tlb.c b/arch/um/kernel/tt/tlb.c index 203216ad86f1..2eefb43bc9c2 100644 --- a/arch/um/kernel/tt/tlb.c +++ b/arch/um/kernel/tt/tlb.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include "os.h" | 17 | #include "os.h" |
18 | #include "tlb.h" | 18 | #include "tlb.h" |
19 | 19 | ||
20 | static void do_ops(int unused, struct host_vm_op *ops, int last) | 20 | static void do_ops(union mm_context *mmu, struct host_vm_op *ops, int last) |
21 | { | 21 | { |
22 | struct host_vm_op *op; | 22 | struct host_vm_op *op; |
23 | int i; | 23 | int i; |
@@ -55,7 +55,7 @@ static void fix_range(struct mm_struct *mm, unsigned long start_addr, | |||
55 | panic("fix_range fixing wrong address space, current = 0x%p", | 55 | panic("fix_range fixing wrong address space, current = 0x%p", |
56 | current); | 56 | current); |
57 | 57 | ||
58 | fix_range_common(mm, start_addr, end_addr, force, 0, do_ops); | 58 | fix_range_common(mm, start_addr, end_addr, force, do_ops); |
59 | } | 59 | } |
60 | 60 | ||
61 | atomic_t vmchange_seq = ATOMIC_INIT(1); | 61 | atomic_t vmchange_seq = ATOMIC_INIT(1); |
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 61dfd4fef752..163476a8cb1b 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S | |||
@@ -30,6 +30,7 @@ SECTIONS | |||
30 | _einittext = .; | 30 | _einittext = .; |
31 | } | 31 | } |
32 | . = ALIGN(4096); | 32 | . = ALIGN(4096); |
33 | |||
33 | .text : | 34 | .text : |
34 | { | 35 | { |
35 | *(.text) | 36 | *(.text) |
@@ -39,6 +40,12 @@ SECTIONS | |||
39 | /* .gnu.warning sections are handled specially by elf32.em. */ | 40 | /* .gnu.warning sections are handled specially by elf32.em. */ |
40 | *(.gnu.warning) | 41 | *(.gnu.warning) |
41 | *(.gnu.linkonce.t*) | 42 | *(.gnu.linkonce.t*) |
43 | |||
44 | . = ALIGN(4096); | ||
45 | __syscall_stub_start = .; | ||
46 | *(.__syscall_stub*) | ||
47 | __syscall_stub_end = .; | ||
48 | . = ALIGN(4096); | ||
42 | } | 49 | } |
43 | 50 | ||
44 | #include "asm/common.lds.S" | 51 | #include "asm/common.lds.S" |