diff options
author | Jeff Dike <jdike@addtoit.com> | 2006-09-27 04:50:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-27 11:26:16 -0400 |
commit | 995473aec0be1d9993205accc03e19d32d4e4a2a (patch) | |
tree | ab945c35f29b001ae1f42285e67bba082f446293 /arch/um/kernel | |
parent | 3c9173509985b957bea692ea887a8a0e5055cfe8 (diff) |
[PATCH] uml: file renaming
Move some foo_kern.c files to foo.c now that the old foo.c files are out
of the way.
Also cleaned up some whitespace and an emacs formatting comment.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/kernel')
-rw-r--r-- | arch/um/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/um/kernel/process.c (renamed from arch/um/kernel/process_kern.c) | 33 | ||||
-rw-r--r-- | arch/um/kernel/skas/Makefile | 3 | ||||
-rw-r--r-- | arch/um/kernel/skas/exec.c | 30 | ||||
-rw-r--r-- | arch/um/kernel/skas/exec_kern.c | 41 | ||||
-rw-r--r-- | arch/um/kernel/skas/process.c | 217 | ||||
-rw-r--r-- | arch/um/kernel/skas/process_kern.c | 529 |
7 files changed, 663 insertions, 192 deletions
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index a2d93065b2d0..6fa63a2a89e3 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile | |||
@@ -7,7 +7,7 @@ extra-y := vmlinux.lds | |||
7 | clean-files := | 7 | clean-files := |
8 | 8 | ||
9 | obj-y = config.o exec.o exitcode.o init_task.o irq.o ksyms.o mem.o \ | 9 | obj-y = config.o exec.o exitcode.o init_task.o irq.o ksyms.o mem.o \ |
10 | physmem.o process_kern.o ptrace.o reboot.o resource.o sigio.o \ | 10 | physmem.o process.o ptrace.o reboot.o resource.o sigio.o \ |
11 | signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o uaccess.o \ | 11 | signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o uaccess.o \ |
12 | um_arch.o umid.o | 12 | um_arch.o umid.o |
13 | 13 | ||
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process.c index 537895d68ad1..fe6c64abda5b 100644 --- a/arch/um/kernel/process_kern.c +++ b/arch/um/kernel/process.c | |||
@@ -1,10 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) |
3 | * Copyright 2003 PathScale, Inc. | 3 | * Copyright 2003 PathScale, Inc. |
4 | * Licensed under the GPL | 4 | * Licensed under the GPL |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "linux/config.h" | ||
8 | #include "linux/kernel.h" | 7 | #include "linux/kernel.h" |
9 | #include "linux/sched.h" | 8 | #include "linux/sched.h" |
10 | #include "linux/interrupt.h" | 9 | #include "linux/interrupt.h" |
@@ -113,11 +112,11 @@ void set_current(void *t) | |||
113 | 112 | ||
114 | void *_switch_to(void *prev, void *next, void *last) | 113 | void *_switch_to(void *prev, void *next, void *last) |
115 | { | 114 | { |
116 | struct task_struct *from = prev; | 115 | struct task_struct *from = prev; |
117 | struct task_struct *to= next; | 116 | struct task_struct *to= next; |
118 | 117 | ||
119 | to->thread.prev_sched = from; | 118 | to->thread.prev_sched = from; |
120 | set_current(to); | 119 | set_current(to); |
121 | 120 | ||
122 | do { | 121 | do { |
123 | current->thread.saved_task = NULL ; | 122 | current->thread.saved_task = NULL ; |
@@ -128,7 +127,7 @@ void *_switch_to(void *prev, void *next, void *last) | |||
128 | prev= current; | 127 | prev= current; |
129 | } while(current->thread.saved_task); | 128 | } while(current->thread.saved_task); |
130 | 129 | ||
131 | return(current->thread.prev_sched); | 130 | return(current->thread.prev_sched); |
132 | 131 | ||
133 | } | 132 | } |
134 | 133 | ||
@@ -142,19 +141,19 @@ void release_thread(struct task_struct *task) | |||
142 | { | 141 | { |
143 | CHOOSE_MODE(release_thread_tt(task), release_thread_skas(task)); | 142 | CHOOSE_MODE(release_thread_tt(task), release_thread_skas(task)); |
144 | } | 143 | } |
145 | 144 | ||
146 | void exit_thread(void) | 145 | void exit_thread(void) |
147 | { | 146 | { |
148 | unprotect_stack((unsigned long) current_thread); | 147 | unprotect_stack((unsigned long) current_thread); |
149 | } | 148 | } |
150 | 149 | ||
151 | void *get_current(void) | 150 | void *get_current(void) |
152 | { | 151 | { |
153 | return(current); | 152 | return(current); |
154 | } | 153 | } |
155 | 154 | ||
156 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 155 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
157 | unsigned long stack_top, struct task_struct * p, | 156 | unsigned long stack_top, struct task_struct * p, |
158 | struct pt_regs *regs) | 157 | struct pt_regs *regs) |
159 | { | 158 | { |
160 | int ret; | 159 | int ret; |
@@ -183,11 +182,11 @@ void initial_thread_cb(void (*proc)(void *), void *arg) | |||
183 | int save_kmalloc_ok = kmalloc_ok; | 182 | int save_kmalloc_ok = kmalloc_ok; |
184 | 183 | ||
185 | kmalloc_ok = 0; | 184 | kmalloc_ok = 0; |
186 | CHOOSE_MODE_PROC(initial_thread_cb_tt, initial_thread_cb_skas, proc, | 185 | CHOOSE_MODE_PROC(initial_thread_cb_tt, initial_thread_cb_skas, proc, |
187 | arg); | 186 | arg); |
188 | kmalloc_ok = save_kmalloc_ok; | 187 | kmalloc_ok = save_kmalloc_ok; |
189 | } | 188 | } |
190 | 189 | ||
191 | unsigned long stack_sp(unsigned long page) | 190 | unsigned long stack_sp(unsigned long page) |
192 | { | 191 | { |
193 | return(page + PAGE_SIZE - sizeof(void *)); | 192 | return(page + PAGE_SIZE - sizeof(void *)); |
@@ -211,7 +210,7 @@ void default_idle(void) | |||
211 | */ | 210 | */ |
212 | if(need_resched()) | 211 | if(need_resched()) |
213 | schedule(); | 212 | schedule(); |
214 | 213 | ||
215 | idle_sleep(10); | 214 | idle_sleep(10); |
216 | } | 215 | } |
217 | } | 216 | } |
@@ -226,7 +225,7 @@ int page_size(void) | |||
226 | return(PAGE_SIZE); | 225 | return(PAGE_SIZE); |
227 | } | 226 | } |
228 | 227 | ||
229 | void *um_virt_to_phys(struct task_struct *task, unsigned long addr, | 228 | void *um_virt_to_phys(struct task_struct *task, unsigned long addr, |
230 | pte_t *pte_out) | 229 | pte_t *pte_out) |
231 | { | 230 | { |
232 | pgd_t *pgd; | 231 | pgd_t *pgd; |
@@ -235,7 +234,7 @@ void *um_virt_to_phys(struct task_struct *task, unsigned long addr, | |||
235 | pte_t *pte; | 234 | pte_t *pte; |
236 | pte_t ptent; | 235 | pte_t ptent; |
237 | 236 | ||
238 | if(task->mm == NULL) | 237 | if(task->mm == NULL) |
239 | return(ERR_PTR(-EINVAL)); | 238 | return(ERR_PTR(-EINVAL)); |
240 | pgd = pgd_offset(task->mm, addr); | 239 | pgd = pgd_offset(task->mm, addr); |
241 | if(!pgd_present(*pgd)) | 240 | if(!pgd_present(*pgd)) |
@@ -246,7 +245,7 @@ void *um_virt_to_phys(struct task_struct *task, unsigned long addr, | |||
246 | return(ERR_PTR(-EINVAL)); | 245 | return(ERR_PTR(-EINVAL)); |
247 | 246 | ||
248 | pmd = pmd_offset(pud, addr); | 247 | pmd = pmd_offset(pud, addr); |
249 | if(!pmd_present(*pmd)) | 248 | if(!pmd_present(*pmd)) |
250 | return(ERR_PTR(-EINVAL)); | 249 | return(ERR_PTR(-EINVAL)); |
251 | 250 | ||
252 | pte = pte_offset_kernel(pmd, addr); | 251 | pte = pte_offset_kernel(pmd, addr); |
@@ -271,7 +270,7 @@ char *current_cmd(void) | |||
271 | 270 | ||
272 | void force_sigbus(void) | 271 | void force_sigbus(void) |
273 | { | 272 | { |
274 | printk(KERN_ERR "Killing pid %d because of a lack of memory\n", | 273 | printk(KERN_ERR "Killing pid %d because of a lack of memory\n", |
275 | current->pid); | 274 | current->pid); |
276 | lock_kernel(); | 275 | lock_kernel(); |
277 | sigaddset(¤t->pending.signal, SIGBUS); | 276 | sigaddset(¤t->pending.signal, SIGBUS); |
diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile index ea3a8e409a6e..3e3fa7e7e3cf 100644 --- a/arch/um/kernel/skas/Makefile +++ b/arch/um/kernel/skas/Makefile | |||
@@ -3,8 +3,7 @@ | |||
3 | # Licensed under the GPL | 3 | # Licensed under the GPL |
4 | # | 4 | # |
5 | 5 | ||
6 | obj-y := clone.o exec_kern.o mem.o mmu.o process_kern.o \ | 6 | obj-y := clone.o exec.o mem.o mmu.o process.o syscall.o tlb.o uaccess.o |
7 | syscall.o tlb.o uaccess.o | ||
8 | 7 | ||
9 | # clone.o is in the stub, so it can't be built with profiling | 8 | # clone.o is in the stub, so it can't be built with profiling |
10 | # GCC hardened also auto-enables -fpic, but we need %ebx so it can't work -> | 9 | # GCC hardened also auto-enables -fpic, but we need %ebx so it can't work -> |
diff --git a/arch/um/kernel/skas/exec.c b/arch/um/kernel/skas/exec.c new file mode 100644 index 000000000000..54b795951372 --- /dev/null +++ b/arch/um/kernel/skas/exec.c | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/kernel.h" | ||
7 | #include "asm/current.h" | ||
8 | #include "asm/page.h" | ||
9 | #include "asm/signal.h" | ||
10 | #include "asm/ptrace.h" | ||
11 | #include "asm/uaccess.h" | ||
12 | #include "asm/mmu_context.h" | ||
13 | #include "tlb.h" | ||
14 | #include "skas.h" | ||
15 | #include "um_mmu.h" | ||
16 | #include "os.h" | ||
17 | |||
18 | void flush_thread_skas(void) | ||
19 | { | ||
20 | force_flush_all(); | ||
21 | switch_mm_skas(¤t->mm->context.skas.id); | ||
22 | } | ||
23 | |||
24 | void start_thread_skas(struct pt_regs *regs, unsigned long eip, | ||
25 | unsigned long esp) | ||
26 | { | ||
27 | set_fs(USER_DS); | ||
28 | PT_REGS_IP(regs) = eip; | ||
29 | PT_REGS_SP(regs) = esp; | ||
30 | } | ||
diff --git a/arch/um/kernel/skas/exec_kern.c b/arch/um/kernel/skas/exec_kern.c deleted file mode 100644 index 77ed7bbab219..000000000000 --- a/arch/um/kernel/skas/exec_kern.c +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/kernel.h" | ||
7 | #include "asm/current.h" | ||
8 | #include "asm/page.h" | ||
9 | #include "asm/signal.h" | ||
10 | #include "asm/ptrace.h" | ||
11 | #include "asm/uaccess.h" | ||
12 | #include "asm/mmu_context.h" | ||
13 | #include "tlb.h" | ||
14 | #include "skas.h" | ||
15 | #include "um_mmu.h" | ||
16 | #include "os.h" | ||
17 | |||
18 | void flush_thread_skas(void) | ||
19 | { | ||
20 | force_flush_all(); | ||
21 | switch_mm_skas(¤t->mm->context.skas.id); | ||
22 | } | ||
23 | |||
24 | void start_thread_skas(struct pt_regs *regs, unsigned long eip, | ||
25 | unsigned long esp) | ||
26 | { | ||
27 | set_fs(USER_DS); | ||
28 | PT_REGS_IP(regs) = eip; | ||
29 | PT_REGS_SP(regs) = esp; | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
34 | * Emacs will notice this stuff at the end of the file and automatically | ||
35 | * adjust the settings for this buffer only. This must remain at the end | ||
36 | * of the file. | ||
37 | * --------------------------------------------------------------------------- | ||
38 | * Local variables: | ||
39 | * c-file-style: "linux" | ||
40 | * End: | ||
41 | */ | ||
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c new file mode 100644 index 000000000000..ae4fa71d3b8b --- /dev/null +++ b/arch/um/kernel/skas/process.c | |||
@@ -0,0 +1,217 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/sched.h" | ||
7 | #include "linux/slab.h" | ||
8 | #include "linux/ptrace.h" | ||
9 | #include "linux/proc_fs.h" | ||
10 | #include "linux/file.h" | ||
11 | #include "linux/errno.h" | ||
12 | #include "linux/init.h" | ||
13 | #include "asm/uaccess.h" | ||
14 | #include "asm/atomic.h" | ||
15 | #include "kern_util.h" | ||
16 | #include "skas.h" | ||
17 | #include "os.h" | ||
18 | #include "user_util.h" | ||
19 | #include "tlb.h" | ||
20 | #include "kern.h" | ||
21 | #include "mode.h" | ||
22 | #include "registers.h" | ||
23 | |||
24 | void switch_to_skas(void *prev, void *next) | ||
25 | { | ||
26 | struct task_struct *from, *to; | ||
27 | |||
28 | from = prev; | ||
29 | to = next; | ||
30 | |||
31 | /* XXX need to check runqueues[cpu].idle */ | ||
32 | if(current->pid == 0) | ||
33 | switch_timers(0); | ||
34 | |||
35 | switch_threads(&from->thread.mode.skas.switch_buf, | ||
36 | &to->thread.mode.skas.switch_buf); | ||
37 | |||
38 | arch_switch_to_skas(current->thread.prev_sched, current); | ||
39 | |||
40 | if(current->pid == 0) | ||
41 | switch_timers(1); | ||
42 | } | ||
43 | |||
44 | extern void schedule_tail(struct task_struct *prev); | ||
45 | |||
46 | /* This is called magically, by its address being stuffed in a jmp_buf | ||
47 | * and being longjmp-d to. | ||
48 | */ | ||
49 | void new_thread_handler(void) | ||
50 | { | ||
51 | int (*fn)(void *), n; | ||
52 | void *arg; | ||
53 | |||
54 | if(current->thread.prev_sched != NULL) | ||
55 | schedule_tail(current->thread.prev_sched); | ||
56 | current->thread.prev_sched = NULL; | ||
57 | |||
58 | fn = current->thread.request.u.thread.proc; | ||
59 | arg = current->thread.request.u.thread.arg; | ||
60 | |||
61 | /* The return value is 1 if the kernel thread execs a process, | ||
62 | * 0 if it just exits | ||
63 | */ | ||
64 | n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); | ||
65 | if(n == 1){ | ||
66 | /* Handle any immediate reschedules or signals */ | ||
67 | interrupt_end(); | ||
68 | userspace(¤t->thread.regs.regs); | ||
69 | } | ||
70 | else do_exit(0); | ||
71 | } | ||
72 | |||
73 | void release_thread_skas(struct task_struct *task) | ||
74 | { | ||
75 | } | ||
76 | |||
77 | /* Called magically, see new_thread_handler above */ | ||
78 | void fork_handler(void) | ||
79 | { | ||
80 | force_flush_all(); | ||
81 | if(current->thread.prev_sched == NULL) | ||
82 | panic("blech"); | ||
83 | |||
84 | schedule_tail(current->thread.prev_sched); | ||
85 | |||
86 | /* XXX: if interrupt_end() calls schedule, this call to | ||
87 | * arch_switch_to_skas isn't needed. We could want to apply this to | ||
88 | * improve performance. -bb */ | ||
89 | arch_switch_to_skas(current->thread.prev_sched, current); | ||
90 | |||
91 | current->thread.prev_sched = NULL; | ||
92 | |||
93 | /* Handle any immediate reschedules or signals */ | ||
94 | interrupt_end(); | ||
95 | |||
96 | userspace(¤t->thread.regs.regs); | ||
97 | } | ||
98 | |||
99 | int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp, | ||
100 | unsigned long stack_top, struct task_struct * p, | ||
101 | struct pt_regs *regs) | ||
102 | { | ||
103 | void (*handler)(void); | ||
104 | |||
105 | if(current->thread.forking){ | ||
106 | memcpy(&p->thread.regs.regs.skas, ®s->regs.skas, | ||
107 | sizeof(p->thread.regs.regs.skas)); | ||
108 | REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0); | ||
109 | if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp; | ||
110 | |||
111 | handler = fork_handler; | ||
112 | |||
113 | arch_copy_thread(¤t->thread.arch, &p->thread.arch); | ||
114 | } | ||
115 | else { | ||
116 | init_thread_registers(&p->thread.regs.regs); | ||
117 | p->thread.request.u.thread = current->thread.request.u.thread; | ||
118 | handler = new_thread_handler; | ||
119 | } | ||
120 | |||
121 | new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf, | ||
122 | handler); | ||
123 | return(0); | ||
124 | } | ||
125 | |||
126 | int new_mm(unsigned long stack) | ||
127 | { | ||
128 | int fd; | ||
129 | |||
130 | fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0); | ||
131 | if(fd < 0) | ||
132 | return(fd); | ||
133 | |||
134 | if(skas_needs_stub) | ||
135 | map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack); | ||
136 | |||
137 | return(fd); | ||
138 | } | ||
139 | |||
140 | void init_idle_skas(void) | ||
141 | { | ||
142 | cpu_tasks[current_thread->cpu].pid = os_getpid(); | ||
143 | default_idle(); | ||
144 | } | ||
145 | |||
146 | extern void start_kernel(void); | ||
147 | |||
148 | static int start_kernel_proc(void *unused) | ||
149 | { | ||
150 | int pid; | ||
151 | |||
152 | block_signals(); | ||
153 | pid = os_getpid(); | ||
154 | |||
155 | cpu_tasks[0].pid = pid; | ||
156 | cpu_tasks[0].task = current; | ||
157 | #ifdef CONFIG_SMP | ||
158 | cpu_online_map = cpumask_of_cpu(0); | ||
159 | #endif | ||
160 | start_kernel(); | ||
161 | return(0); | ||
162 | } | ||
163 | |||
164 | extern int userspace_pid[]; | ||
165 | |||
166 | int start_uml_skas(void) | ||
167 | { | ||
168 | if(proc_mm) | ||
169 | userspace_pid[0] = start_userspace(0); | ||
170 | |||
171 | init_new_thread_signals(); | ||
172 | |||
173 | init_task.thread.request.u.thread.proc = start_kernel_proc; | ||
174 | init_task.thread.request.u.thread.arg = NULL; | ||
175 | return(start_idle_thread(task_stack_page(&init_task), | ||
176 | &init_task.thread.mode.skas.switch_buf)); | ||
177 | } | ||
178 | |||
179 | int external_pid_skas(struct task_struct *task) | ||
180 | { | ||
181 | #warning Need to look up userspace_pid by cpu | ||
182 | return(userspace_pid[0]); | ||
183 | } | ||
184 | |||
185 | int thread_pid_skas(struct task_struct *task) | ||
186 | { | ||
187 | #warning Need to look up userspace_pid by cpu | ||
188 | return(userspace_pid[0]); | ||
189 | } | ||
190 | |||
191 | void kill_off_processes_skas(void) | ||
192 | { | ||
193 | if(proc_mm) | ||
194 | #warning need to loop over userspace_pids in kill_off_processes_skas | ||
195 | os_kill_ptraced_process(userspace_pid[0], 1); | ||
196 | else { | ||
197 | struct task_struct *p; | ||
198 | int pid, me; | ||
199 | |||
200 | me = os_getpid(); | ||
201 | for_each_process(p){ | ||
202 | if(p->mm == NULL) | ||
203 | continue; | ||
204 | |||
205 | pid = p->mm->context.skas.id.u.pid; | ||
206 | os_kill_ptraced_process(pid, 1); | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | |||
211 | unsigned long current_stub_stack(void) | ||
212 | { | ||
213 | if(current->mm == NULL) | ||
214 | return(0); | ||
215 | |||
216 | return(current->mm->context.skas.id.stack); | ||
217 | } | ||
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c index c73d47c57abe..0f3d5d084dc7 100644 --- a/arch/um/kernel/skas/process_kern.c +++ b/arch/um/kernel/skas/process_kern.c | |||
@@ -1,217 +1,484 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) |
3 | * Copyright 2003 PathScale, Inc. | ||
3 | * Licensed under the GPL | 4 | * Licensed under the GPL |
4 | */ | 5 | */ |
5 | 6 | ||
7 | #include "linux/config.h" | ||
8 | #include "linux/kernel.h" | ||
6 | #include "linux/sched.h" | 9 | #include "linux/sched.h" |
10 | #include "linux/interrupt.h" | ||
11 | #include "linux/string.h" | ||
12 | #include "linux/mm.h" | ||
7 | #include "linux/slab.h" | 13 | #include "linux/slab.h" |
8 | #include "linux/ptrace.h" | 14 | #include "linux/utsname.h" |
9 | #include "linux/proc_fs.h" | 15 | #include "linux/fs.h" |
10 | #include "linux/file.h" | 16 | #include "linux/utime.h" |
11 | #include "linux/errno.h" | 17 | #include "linux/smp_lock.h" |
18 | #include "linux/module.h" | ||
12 | #include "linux/init.h" | 19 | #include "linux/init.h" |
20 | #include "linux/capability.h" | ||
21 | #include "linux/vmalloc.h" | ||
22 | #include "linux/spinlock.h" | ||
23 | #include "linux/proc_fs.h" | ||
24 | #include "linux/ptrace.h" | ||
25 | #include "linux/random.h" | ||
26 | #include "linux/personality.h" | ||
27 | #include "asm/unistd.h" | ||
28 | #include "asm/mman.h" | ||
29 | #include "asm/segment.h" | ||
30 | #include "asm/stat.h" | ||
31 | #include "asm/pgtable.h" | ||
32 | #include "asm/processor.h" | ||
33 | #include "asm/tlbflush.h" | ||
13 | #include "asm/uaccess.h" | 34 | #include "asm/uaccess.h" |
14 | #include "asm/atomic.h" | 35 | #include "asm/user.h" |
15 | #include "kern_util.h" | ||
16 | #include "skas.h" | ||
17 | #include "os.h" | ||
18 | #include "user_util.h" | 36 | #include "user_util.h" |
19 | #include "tlb.h" | 37 | #include "kern_util.h" |
20 | #include "kern.h" | 38 | #include "kern.h" |
39 | #include "signal_kern.h" | ||
40 | #include "init.h" | ||
41 | #include "irq_user.h" | ||
42 | #include "mem_user.h" | ||
43 | #include "tlb.h" | ||
44 | #include "frame_kern.h" | ||
45 | #include "sigcontext.h" | ||
46 | #include "os.h" | ||
21 | #include "mode.h" | 47 | #include "mode.h" |
22 | #include "registers.h" | 48 | #include "mode_kern.h" |
49 | #include "choose-mode.h" | ||
50 | |||
51 | /* This is a per-cpu array. A processor only modifies its entry and it only | ||
52 | * cares about its entry, so it's OK if another processor is modifying its | ||
53 | * entry. | ||
54 | */ | ||
55 | struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; | ||
56 | |||
57 | int external_pid(void *t) | ||
58 | { | ||
59 | struct task_struct *task = t ? t : current; | ||
60 | |||
61 | return(CHOOSE_MODE_PROC(external_pid_tt, external_pid_skas, task)); | ||
62 | } | ||
63 | |||
64 | int pid_to_processor_id(int pid) | ||
65 | { | ||
66 | int i; | ||
23 | 67 | ||
24 | void switch_to_skas(void *prev, void *next) | 68 | for(i = 0; i < ncpus; i++){ |
69 | if(cpu_tasks[i].pid == pid) return(i); | ||
70 | } | ||
71 | return(-1); | ||
72 | } | ||
73 | |||
74 | void free_stack(unsigned long stack, int order) | ||
25 | { | 75 | { |
26 | struct task_struct *from, *to; | 76 | free_pages(stack, order); |
77 | } | ||
27 | 78 | ||
28 | from = prev; | 79 | unsigned long alloc_stack(int order, int atomic) |
29 | to = next; | 80 | { |
81 | unsigned long page; | ||
82 | gfp_t flags = GFP_KERNEL; | ||
30 | 83 | ||
31 | /* XXX need to check runqueues[cpu].idle */ | 84 | if (atomic) |
32 | if(current->pid == 0) | 85 | flags = GFP_ATOMIC; |
33 | switch_timers(0); | 86 | page = __get_free_pages(flags, order); |
87 | if(page == 0) | ||
88 | return(0); | ||
89 | stack_protections(page); | ||
90 | return(page); | ||
91 | } | ||
34 | 92 | ||
35 | switch_threads(&from->thread.mode.skas.switch_buf, | 93 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) |
36 | &to->thread.mode.skas.switch_buf); | 94 | { |
95 | int pid; | ||
96 | |||
97 | current->thread.request.u.thread.proc = fn; | ||
98 | current->thread.request.u.thread.arg = arg; | ||
99 | pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0, | ||
100 | ¤t->thread.regs, 0, NULL, NULL); | ||
101 | if(pid < 0) | ||
102 | panic("do_fork failed in kernel_thread, errno = %d", pid); | ||
103 | return(pid); | ||
104 | } | ||
37 | 105 | ||
38 | arch_switch_to_skas(current->thread.prev_sched, current); | 106 | void set_current(void *t) |
107 | { | ||
108 | struct task_struct *task = t; | ||
39 | 109 | ||
40 | if(current->pid == 0) | 110 | cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) |
41 | switch_timers(1); | 111 | { external_pid(task), task }); |
42 | } | 112 | } |
43 | 113 | ||
44 | extern void schedule_tail(struct task_struct *prev); | 114 | void *_switch_to(void *prev, void *next, void *last) |
115 | { | ||
116 | struct task_struct *from = prev; | ||
117 | struct task_struct *to= next; | ||
45 | 118 | ||
46 | /* This is called magically, by its address being stuffed in a jmp_buf | 119 | to->thread.prev_sched = from; |
47 | * and being longjmp-d to. | 120 | set_current(to); |
48 | */ | 121 | |
49 | void new_thread_handler(void) | 122 | do { |
123 | current->thread.saved_task = NULL ; | ||
124 | CHOOSE_MODE_PROC(switch_to_tt, switch_to_skas, prev, next); | ||
125 | if(current->thread.saved_task) | ||
126 | show_regs(&(current->thread.regs)); | ||
127 | next= current->thread.saved_task; | ||
128 | prev= current; | ||
129 | } while(current->thread.saved_task); | ||
130 | |||
131 | return(current->thread.prev_sched); | ||
132 | |||
133 | } | ||
134 | |||
135 | void interrupt_end(void) | ||
50 | { | 136 | { |
51 | int (*fn)(void *), n; | 137 | if(need_resched()) schedule(); |
52 | void *arg; | 138 | if(test_tsk_thread_flag(current, TIF_SIGPENDING)) do_signal(); |
139 | } | ||
53 | 140 | ||
54 | if(current->thread.prev_sched != NULL) | 141 | void release_thread(struct task_struct *task) |
55 | schedule_tail(current->thread.prev_sched); | 142 | { |
56 | current->thread.prev_sched = NULL; | 143 | CHOOSE_MODE(release_thread_tt(task), release_thread_skas(task)); |
144 | } | ||
57 | 145 | ||
58 | fn = current->thread.request.u.thread.proc; | 146 | void exit_thread(void) |
59 | arg = current->thread.request.u.thread.arg; | 147 | { |
148 | unprotect_stack((unsigned long) current_thread); | ||
149 | } | ||
60 | 150 | ||
61 | /* The return value is 1 if the kernel thread execs a process, | 151 | void *get_current(void) |
62 | * 0 if it just exits | 152 | { |
153 | return(current); | ||
154 | } | ||
155 | |||
156 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | ||
157 | unsigned long stack_top, struct task_struct * p, | ||
158 | struct pt_regs *regs) | ||
159 | { | ||
160 | int ret; | ||
161 | |||
162 | p->thread = (struct thread_struct) INIT_THREAD; | ||
163 | ret = CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr, | ||
164 | clone_flags, sp, stack_top, p, regs); | ||
165 | |||
166 | if (ret || !current->thread.forking) | ||
167 | goto out; | ||
168 | |||
169 | clear_flushed_tls(p); | ||
170 | |||
171 | /* | ||
172 | * Set a new TLS for the child thread? | ||
63 | */ | 173 | */ |
64 | n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); | 174 | if (clone_flags & CLONE_SETTLS) |
65 | if(n == 1){ | 175 | ret = arch_copy_tls(p); |
66 | /* Handle any immediate reschedules or signals */ | 176 | |
67 | interrupt_end(); | 177 | out: |
68 | userspace(¤t->thread.regs.regs); | 178 | return ret; |
179 | } | ||
180 | |||
181 | void initial_thread_cb(void (*proc)(void *), void *arg) | ||
182 | { | ||
183 | int save_kmalloc_ok = kmalloc_ok; | ||
184 | |||
185 | kmalloc_ok = 0; | ||
186 | CHOOSE_MODE_PROC(initial_thread_cb_tt, initial_thread_cb_skas, proc, | ||
187 | arg); | ||
188 | kmalloc_ok = save_kmalloc_ok; | ||
189 | } | ||
190 | |||
191 | unsigned long stack_sp(unsigned long page) | ||
192 | { | ||
193 | return(page + PAGE_SIZE - sizeof(void *)); | ||
194 | } | ||
195 | |||
196 | int current_pid(void) | ||
197 | { | ||
198 | return(current->pid); | ||
199 | } | ||
200 | |||
201 | void default_idle(void) | ||
202 | { | ||
203 | CHOOSE_MODE(uml_idle_timer(), (void) 0); | ||
204 | |||
205 | while(1){ | ||
206 | /* endless idle loop with no priority at all */ | ||
207 | |||
208 | /* | ||
209 | * although we are an idle CPU, we do not want to | ||
210 | * get into the scheduler unnecessarily. | ||
211 | */ | ||
212 | if(need_resched()) | ||
213 | schedule(); | ||
214 | |||
215 | idle_sleep(10); | ||
69 | } | 216 | } |
70 | else do_exit(0); | ||
71 | } | 217 | } |
72 | 218 | ||
73 | void release_thread_skas(struct task_struct *task) | 219 | void cpu_idle(void) |
74 | { | 220 | { |
221 | CHOOSE_MODE(init_idle_tt(), init_idle_skas()); | ||
75 | } | 222 | } |
76 | 223 | ||
77 | /* Called magically, see new_thread_handler above */ | 224 | int page_size(void) |
78 | void fork_handler(void) | ||
79 | { | 225 | { |
80 | force_flush_all(); | 226 | return(PAGE_SIZE); |
81 | if(current->thread.prev_sched == NULL) | 227 | } |
82 | panic("blech"); | ||
83 | 228 | ||
84 | schedule_tail(current->thread.prev_sched); | 229 | void *um_virt_to_phys(struct task_struct *task, unsigned long addr, |
230 | pte_t *pte_out) | ||
231 | { | ||
232 | pgd_t *pgd; | ||
233 | pud_t *pud; | ||
234 | pmd_t *pmd; | ||
235 | pte_t *pte; | ||
236 | pte_t ptent; | ||
237 | |||
238 | if(task->mm == NULL) | ||
239 | return(ERR_PTR(-EINVAL)); | ||
240 | pgd = pgd_offset(task->mm, addr); | ||
241 | if(!pgd_present(*pgd)) | ||
242 | return(ERR_PTR(-EINVAL)); | ||
243 | |||
244 | pud = pud_offset(pgd, addr); | ||
245 | if(!pud_present(*pud)) | ||
246 | return(ERR_PTR(-EINVAL)); | ||
247 | |||
248 | pmd = pmd_offset(pud, addr); | ||
249 | if(!pmd_present(*pmd)) | ||
250 | return(ERR_PTR(-EINVAL)); | ||
251 | |||
252 | pte = pte_offset_kernel(pmd, addr); | ||
253 | ptent = *pte; | ||
254 | if(!pte_present(ptent)) | ||
255 | return(ERR_PTR(-EINVAL)); | ||
256 | |||
257 | if(pte_out != NULL) | ||
258 | *pte_out = ptent; | ||
259 | return((void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK)); | ||
260 | } | ||
85 | 261 | ||
86 | /* XXX: if interrupt_end() calls schedule, this call to | 262 | char *current_cmd(void) |
87 | * arch_switch_to_skas isn't needed. We could want to apply this to | 263 | { |
88 | * improve performance. -bb */ | 264 | #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM) |
89 | arch_switch_to_skas(current->thread.prev_sched, current); | 265 | return("(Unknown)"); |
266 | #else | ||
267 | void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL); | ||
268 | return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr); | ||
269 | #endif | ||
270 | } | ||
90 | 271 | ||
91 | current->thread.prev_sched = NULL; | 272 | void force_sigbus(void) |
273 | { | ||
274 | printk(KERN_ERR "Killing pid %d because of a lack of memory\n", | ||
275 | current->pid); | ||
276 | lock_kernel(); | ||
277 | sigaddset(¤t->pending.signal, SIGBUS); | ||
278 | recalc_sigpending(); | ||
279 | current->flags |= PF_SIGNALED; | ||
280 | do_exit(SIGBUS | 0x80); | ||
281 | } | ||
92 | 282 | ||
93 | /* Handle any immediate reschedules or signals */ | 283 | void dump_thread(struct pt_regs *regs, struct user *u) |
94 | interrupt_end(); | 284 | { |
285 | } | ||
95 | 286 | ||
96 | userspace(¤t->thread.regs.regs); | 287 | void enable_hlt(void) |
288 | { | ||
289 | panic("enable_hlt"); | ||
97 | } | 290 | } |
98 | 291 | ||
99 | int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp, | 292 | EXPORT_SYMBOL(enable_hlt); |
100 | unsigned long stack_top, struct task_struct * p, | 293 | |
101 | struct pt_regs *regs) | 294 | void disable_hlt(void) |
102 | { | 295 | { |
103 | void (*handler)(void); | 296 | panic("disable_hlt"); |
297 | } | ||
104 | 298 | ||
105 | if(current->thread.forking){ | 299 | EXPORT_SYMBOL(disable_hlt); |
106 | memcpy(&p->thread.regs.regs.skas, ®s->regs.skas, | ||
107 | sizeof(p->thread.regs.regs.skas)); | ||
108 | REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0); | ||
109 | if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp; | ||
110 | 300 | ||
111 | handler = fork_handler; | 301 | void *um_kmalloc(int size) |
302 | { | ||
303 | return kmalloc(size, GFP_KERNEL); | ||
304 | } | ||
112 | 305 | ||
113 | arch_copy_thread(¤t->thread.arch, &p->thread.arch); | 306 | void *um_kmalloc_atomic(int size) |
114 | } | 307 | { |
115 | else { | 308 | return kmalloc(size, GFP_ATOMIC); |
116 | init_thread_registers(&p->thread.regs.regs); | 309 | } |
117 | p->thread.request.u.thread = current->thread.request.u.thread; | ||
118 | handler = new_thread_handler; | ||
119 | } | ||
120 | 310 | ||
121 | new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf, | 311 | void *um_vmalloc(int size) |
122 | handler); | 312 | { |
123 | return(0); | 313 | return vmalloc(size); |
124 | } | 314 | } |
125 | 315 | ||
126 | int new_mm(unsigned long stack) | 316 | void *um_vmalloc_atomic(int size) |
127 | { | 317 | { |
128 | int fd; | 318 | return __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM, PAGE_KERNEL); |
319 | } | ||
129 | 320 | ||
130 | fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0); | 321 | int __cant_sleep(void) { |
131 | if(fd < 0) | 322 | return in_atomic() || irqs_disabled() || in_interrupt(); |
132 | return(fd); | 323 | /* Is in_interrupt() really needed? */ |
324 | } | ||
133 | 325 | ||
134 | if(skas_needs_stub) | 326 | unsigned long get_fault_addr(void) |
135 | map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack); | 327 | { |
328 | return((unsigned long) current->thread.fault_addr); | ||
329 | } | ||
136 | 330 | ||
137 | return(fd); | 331 | EXPORT_SYMBOL(get_fault_addr); |
332 | |||
333 | void not_implemented(void) | ||
334 | { | ||
335 | printk(KERN_DEBUG "Something isn't implemented in here\n"); | ||
138 | } | 336 | } |
139 | 337 | ||
140 | void init_idle_skas(void) | 338 | EXPORT_SYMBOL(not_implemented); |
339 | |||
340 | int user_context(unsigned long sp) | ||
141 | { | 341 | { |
142 | cpu_tasks[current_thread->cpu].pid = os_getpid(); | 342 | unsigned long stack; |
143 | default_idle(); | 343 | |
344 | stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); | ||
345 | return(stack != (unsigned long) current_thread); | ||
144 | } | 346 | } |
145 | 347 | ||
146 | extern void start_kernel(void); | 348 | extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; |
147 | 349 | ||
148 | static int start_kernel_proc(void *unused) | 350 | void do_uml_exitcalls(void) |
149 | { | 351 | { |
150 | int pid; | 352 | exitcall_t *call; |
353 | |||
354 | call = &__uml_exitcall_end; | ||
355 | while (--call >= &__uml_exitcall_begin) | ||
356 | (*call)(); | ||
357 | } | ||
151 | 358 | ||
152 | block_signals(); | 359 | char *uml_strdup(char *string) |
153 | pid = os_getpid(); | 360 | { |
361 | return kstrdup(string, GFP_KERNEL); | ||
362 | } | ||
154 | 363 | ||
155 | cpu_tasks[0].pid = pid; | 364 | int copy_to_user_proc(void __user *to, void *from, int size) |
156 | cpu_tasks[0].task = current; | 365 | { |
366 | return(copy_to_user(to, from, size)); | ||
367 | } | ||
368 | |||
369 | int copy_from_user_proc(void *to, void __user *from, int size) | ||
370 | { | ||
371 | return(copy_from_user(to, from, size)); | ||
372 | } | ||
373 | |||
374 | int clear_user_proc(void __user *buf, int size) | ||
375 | { | ||
376 | return(clear_user(buf, size)); | ||
377 | } | ||
378 | |||
379 | int strlen_user_proc(char __user *str) | ||
380 | { | ||
381 | return(strlen_user(str)); | ||
382 | } | ||
383 | |||
384 | int smp_sigio_handler(void) | ||
385 | { | ||
157 | #ifdef CONFIG_SMP | 386 | #ifdef CONFIG_SMP |
158 | cpu_online_map = cpumask_of_cpu(0); | 387 | int cpu = current_thread->cpu; |
388 | IPI_handler(cpu); | ||
389 | if(cpu != 0) | ||
390 | return(1); | ||
159 | #endif | 391 | #endif |
160 | start_kernel(); | ||
161 | return(0); | 392 | return(0); |
162 | } | 393 | } |
163 | 394 | ||
164 | extern int userspace_pid[]; | 395 | int cpu(void) |
165 | |||
166 | int start_uml_skas(void) | ||
167 | { | 396 | { |
168 | if(proc_mm) | 397 | return(current_thread->cpu); |
169 | userspace_pid[0] = start_userspace(0); | 398 | } |
170 | 399 | ||
171 | init_new_thread_signals(); | 400 | static atomic_t using_sysemu = ATOMIC_INIT(0); |
401 | int sysemu_supported; | ||
172 | 402 | ||
173 | init_task.thread.request.u.thread.proc = start_kernel_proc; | 403 | void set_using_sysemu(int value) |
174 | init_task.thread.request.u.thread.arg = NULL; | 404 | { |
175 | return(start_idle_thread(task_stack_page(&init_task), | 405 | if (value > sysemu_supported) |
176 | &init_task.thread.mode.skas.switch_buf)); | 406 | return; |
407 | atomic_set(&using_sysemu, value); | ||
177 | } | 408 | } |
178 | 409 | ||
179 | int external_pid_skas(struct task_struct *task) | 410 | int get_using_sysemu(void) |
180 | { | 411 | { |
181 | #warning Need to look up userspace_pid by cpu | 412 | return atomic_read(&using_sysemu); |
182 | return(userspace_pid[0]); | ||
183 | } | 413 | } |
184 | 414 | ||
185 | int thread_pid_skas(struct task_struct *task) | 415 | static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) |
186 | { | 416 | { |
187 | #warning Need to look up userspace_pid by cpu | 417 | if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/ |
188 | return(userspace_pid[0]); | 418 | *eof = 1; |
419 | |||
420 | return strlen(buf); | ||
189 | } | 421 | } |
190 | 422 | ||
191 | void kill_off_processes_skas(void) | 423 | static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data) |
192 | { | 424 | { |
193 | if(proc_mm) | 425 | char tmp[2]; |
194 | #warning need to loop over userspace_pids in kill_off_processes_skas | ||
195 | os_kill_ptraced_process(userspace_pid[0], 1); | ||
196 | else { | ||
197 | struct task_struct *p; | ||
198 | int pid, me; | ||
199 | 426 | ||
200 | me = os_getpid(); | 427 | if (copy_from_user(tmp, buf, 1)) |
201 | for_each_process(p){ | 428 | return -EFAULT; |
202 | if(p->mm == NULL) | ||
203 | continue; | ||
204 | 429 | ||
205 | pid = p->mm->context.skas.id.u.pid; | 430 | if (tmp[0] >= '0' && tmp[0] <= '2') |
206 | os_kill_ptraced_process(pid, 1); | 431 | set_using_sysemu(tmp[0] - '0'); |
207 | } | 432 | return count; /*We use the first char, but pretend to write everything*/ |
433 | } | ||
434 | |||
435 | int __init make_proc_sysemu(void) | ||
436 | { | ||
437 | struct proc_dir_entry *ent; | ||
438 | if (!sysemu_supported) | ||
439 | return 0; | ||
440 | |||
441 | ent = create_proc_entry("sysemu", 0600, &proc_root); | ||
442 | |||
443 | if (ent == NULL) | ||
444 | { | ||
445 | printk(KERN_WARNING "Failed to register /proc/sysemu\n"); | ||
446 | return(0); | ||
208 | } | 447 | } |
448 | |||
449 | ent->read_proc = proc_read_sysemu; | ||
450 | ent->write_proc = proc_write_sysemu; | ||
451 | |||
452 | return 0; | ||
209 | } | 453 | } |
210 | 454 | ||
211 | unsigned long current_stub_stack(void) | 455 | late_initcall(make_proc_sysemu); |
456 | |||
457 | int singlestepping(void * t) | ||
212 | { | 458 | { |
213 | if(current->mm == NULL) | 459 | struct task_struct *task = t ? t : current; |
460 | |||
461 | if ( ! (task->ptrace & PT_DTRACE) ) | ||
214 | return(0); | 462 | return(0); |
215 | 463 | ||
216 | return(current->mm->context.skas.id.stack); | 464 | if (task->thread.singlestep_syscall) |
465 | return(1); | ||
466 | |||
467 | return 2; | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Only x86 and x86_64 have an arch_align_stack(). | ||
472 | * All other arches have "#define arch_align_stack(x) (x)" | ||
473 | * in their asm/system.h | ||
474 | * As this is included in UML from asm-um/system-generic.h, | ||
475 | * we can use it to behave as the subarch does. | ||
476 | */ | ||
477 | #ifndef arch_align_stack | ||
478 | unsigned long arch_align_stack(unsigned long sp) | ||
479 | { | ||
480 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | ||
481 | sp -= get_random_int() % 8192; | ||
482 | return sp & ~0xf; | ||
217 | } | 483 | } |
484 | #endif | ||