diff options
Diffstat (limited to 'arch/um/kernel/process.c')
-rw-r--r-- | arch/um/kernel/process.c | 120 |
1 files changed, 99 insertions, 21 deletions
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 22ad46fd2c08..d3b9c62e73c7 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -43,8 +43,7 @@ | |||
43 | #include "frame_kern.h" | 43 | #include "frame_kern.h" |
44 | #include "sigcontext.h" | 44 | #include "sigcontext.h" |
45 | #include "os.h" | 45 | #include "os.h" |
46 | #include "mode.h" | 46 | #include "skas.h" |
47 | #include "mode_kern.h" | ||
48 | 47 | ||
49 | /* This is a per-cpu array. A processor only modifies its entry and it only | 48 | /* This is a per-cpu array. A processor only modifies its entry and it only |
50 | * cares about its entry, so it's OK if another processor is modifying its | 49 | * cares about its entry, so it's OK if another processor is modifying its |
@@ -54,7 +53,8 @@ struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; | |||
54 | 53 | ||
55 | static inline int external_pid(struct task_struct *task) | 54 | static inline int external_pid(struct task_struct *task) |
56 | { | 55 | { |
57 | return external_pid_skas(task); | 56 | /* FIXME: Need to look up userspace_pid by cpu */ |
57 | return(userspace_pid[0]); | ||
58 | } | 58 | } |
59 | 59 | ||
60 | int pid_to_processor_id(int pid) | 60 | int pid_to_processor_id(int pid) |
@@ -104,6 +104,8 @@ static inline void set_current(struct task_struct *task) | |||
104 | { external_pid(task), task }); | 104 | { external_pid(task), task }); |
105 | } | 105 | } |
106 | 106 | ||
107 | extern void arch_switch_to(struct task_struct *from, struct task_struct *to); | ||
108 | |||
107 | void *_switch_to(void *prev, void *next, void *last) | 109 | void *_switch_to(void *prev, void *next, void *last) |
108 | { | 110 | { |
109 | struct task_struct *from = prev; | 111 | struct task_struct *from = prev; |
@@ -114,7 +116,19 @@ void *_switch_to(void *prev, void *next, void *last) | |||
114 | 116 | ||
115 | do { | 117 | do { |
116 | current->thread.saved_task = NULL; | 118 | current->thread.saved_task = NULL; |
117 | switch_to_skas(prev, next); | 119 | |
120 | /* XXX need to check runqueues[cpu].idle */ | ||
121 | if(current->pid == 0) | ||
122 | switch_timers(0); | ||
123 | |||
124 | switch_threads(&from->thread.switch_buf, | ||
125 | &to->thread.switch_buf); | ||
126 | |||
127 | arch_switch_to(current->thread.prev_sched, current); | ||
128 | |||
129 | if(current->pid == 0) | ||
130 | switch_timers(1); | ||
131 | |||
118 | if(current->thread.saved_task) | 132 | if(current->thread.saved_task) |
119 | show_regs(&(current->thread.regs)); | 133 | show_regs(&(current->thread.regs)); |
120 | next= current->thread.saved_task; | 134 | next= current->thread.saved_task; |
@@ -133,11 +147,6 @@ void interrupt_end(void) | |||
133 | do_signal(); | 147 | do_signal(); |
134 | } | 148 | } |
135 | 149 | ||
136 | void release_thread(struct task_struct *task) | ||
137 | { | ||
138 | release_thread_skas(task); | ||
139 | } | ||
140 | |||
141 | void exit_thread(void) | 150 | void exit_thread(void) |
142 | { | 151 | { |
143 | } | 152 | } |
@@ -147,27 +156,95 @@ void *get_current(void) | |||
147 | return current; | 156 | return current; |
148 | } | 157 | } |
149 | 158 | ||
159 | extern void schedule_tail(struct task_struct *prev); | ||
160 | |||
161 | /* This is called magically, by its address being stuffed in a jmp_buf | ||
162 | * and being longjmp-d to. | ||
163 | */ | ||
164 | void new_thread_handler(void) | ||
165 | { | ||
166 | int (*fn)(void *), n; | ||
167 | void *arg; | ||
168 | |||
169 | if(current->thread.prev_sched != NULL) | ||
170 | schedule_tail(current->thread.prev_sched); | ||
171 | current->thread.prev_sched = NULL; | ||
172 | |||
173 | fn = current->thread.request.u.thread.proc; | ||
174 | arg = current->thread.request.u.thread.arg; | ||
175 | |||
176 | /* The return value is 1 if the kernel thread execs a process, | ||
177 | * 0 if it just exits | ||
178 | */ | ||
179 | n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); | ||
180 | if(n == 1){ | ||
181 | /* Handle any immediate reschedules or signals */ | ||
182 | interrupt_end(); | ||
183 | userspace(¤t->thread.regs.regs); | ||
184 | } | ||
185 | else do_exit(0); | ||
186 | } | ||
187 | |||
188 | /* Called magically, see new_thread_handler above */ | ||
189 | void fork_handler(void) | ||
190 | { | ||
191 | force_flush_all(); | ||
192 | if(current->thread.prev_sched == NULL) | ||
193 | panic("blech"); | ||
194 | |||
195 | schedule_tail(current->thread.prev_sched); | ||
196 | |||
197 | /* XXX: if interrupt_end() calls schedule, this call to | ||
198 | * arch_switch_to isn't needed. We could want to apply this to | ||
199 | * improve performance. -bb */ | ||
200 | arch_switch_to(current->thread.prev_sched, current); | ||
201 | |||
202 | current->thread.prev_sched = NULL; | ||
203 | |||
204 | /* Handle any immediate reschedules or signals */ | ||
205 | interrupt_end(); | ||
206 | |||
207 | userspace(¤t->thread.regs.regs); | ||
208 | } | ||
209 | |||
150 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 210 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
151 | unsigned long stack_top, struct task_struct * p, | 211 | unsigned long stack_top, struct task_struct * p, |
152 | struct pt_regs *regs) | 212 | struct pt_regs *regs) |
153 | { | 213 | { |
154 | int ret; | 214 | void (*handler)(void); |
215 | int ret = 0; | ||
155 | 216 | ||
156 | p->thread = (struct thread_struct) INIT_THREAD; | 217 | p->thread = (struct thread_struct) INIT_THREAD; |
157 | ret = copy_thread_skas(nr, clone_flags, sp, stack_top, p, regs); | ||
158 | 218 | ||
159 | if (ret || !current->thread.forking) | 219 | if(current->thread.forking){ |
160 | goto out; | 220 | memcpy(&p->thread.regs.regs, ®s->regs, |
221 | sizeof(p->thread.regs.regs)); | ||
222 | REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0); | ||
223 | if(sp != 0) | ||
224 | REGS_SP(p->thread.regs.regs.regs) = sp; | ||
161 | 225 | ||
162 | clear_flushed_tls(p); | 226 | handler = fork_handler; |
163 | 227 | ||
164 | /* | 228 | arch_copy_thread(¤t->thread.arch, &p->thread.arch); |
165 | * Set a new TLS for the child thread? | 229 | } |
166 | */ | 230 | else { |
167 | if (clone_flags & CLONE_SETTLS) | 231 | init_thread_registers(&p->thread.regs.regs); |
168 | ret = arch_copy_tls(p); | 232 | p->thread.request.u.thread = current->thread.request.u.thread; |
233 | handler = new_thread_handler; | ||
234 | } | ||
235 | |||
236 | new_thread(task_stack_page(p), &p->thread.switch_buf, handler); | ||
237 | |||
238 | if (current->thread.forking) { | ||
239 | clear_flushed_tls(p); | ||
240 | |||
241 | /* | ||
242 | * Set a new TLS for the child thread? | ||
243 | */ | ||
244 | if (clone_flags & CLONE_SETTLS) | ||
245 | ret = arch_copy_tls(p); | ||
246 | } | ||
169 | 247 | ||
170 | out: | ||
171 | return ret; | 248 | return ret; |
172 | } | 249 | } |
173 | 250 | ||
@@ -198,7 +275,8 @@ void default_idle(void) | |||
198 | 275 | ||
199 | void cpu_idle(void) | 276 | void cpu_idle(void) |
200 | { | 277 | { |
201 | init_idle_skas(); | 278 | cpu_tasks[current_thread->cpu].pid = os_getpid(); |
279 | default_idle(); | ||
202 | } | 280 | } |
203 | 281 | ||
204 | void *um_virt_to_phys(struct task_struct *task, unsigned long addr, | 282 | void *um_virt_to_phys(struct task_struct *task, unsigned long addr, |