diff options
| author | Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> | 2006-03-31 05:30:24 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-31 15:18:52 -0500 |
| commit | 54d8d3b5a0ce1cdbad1d3154c9ea9732d394e9c7 (patch) | |
| tree | 129a29ab92fba7dc99229c87a38fe8df3ade7b15 | |
| parent | dd77aec07aec5cb81aed3b4ef79c1ff8bd0e2a68 (diff) | |
[PATCH] uml: add arch_switch_to for newly forked thread
Newly forked threads have no arch_switch_to_skas() called before their first
run, because when schedule() switches to them they're resumed in the body of
thread_wait() inside fork_handler() rather than in switch_threads() in
switch_to_skas(). Compensate this missing call.
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Acked-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
| -rw-r--r-- | arch/um/kernel/skas/process_kern.c | 7 | ||||
| -rw-r--r-- | arch/um/sys-i386/ptrace.c | 9 | ||||
| -rw-r--r-- | arch/um/sys-i386/tls.c | 13 |
3 files changed, 25 insertions, 4 deletions
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c index 38b185370c42..2135eaf98a93 100644 --- a/arch/um/kernel/skas/process_kern.c +++ b/arch/um/kernel/skas/process_kern.c | |||
| @@ -91,10 +91,17 @@ void fork_handler(int sig) | |||
| 91 | panic("blech"); | 91 | panic("blech"); |
| 92 | 92 | ||
| 93 | schedule_tail(current->thread.prev_sched); | 93 | schedule_tail(current->thread.prev_sched); |
| 94 | |||
| 95 | /* XXX: if interrupt_end() calls schedule, this call to | ||
| 96 | * arch_switch_to_skas isn't needed. We could want to apply this to | ||
| 97 | * improve performance. -bb */ | ||
| 98 | arch_switch_to_skas(current->thread.prev_sched, current); | ||
| 99 | |||
| 94 | current->thread.prev_sched = NULL; | 100 | current->thread.prev_sched = NULL; |
| 95 | 101 | ||
| 96 | /* Handle any immediate reschedules or signals */ | 102 | /* Handle any immediate reschedules or signals */ |
| 97 | interrupt_end(); | 103 | interrupt_end(); |
| 104 | |||
| 98 | userspace(¤t->thread.regs.regs); | 105 | userspace(¤t->thread.regs.regs); |
| 99 | } | 106 | } |
| 100 | 107 | ||
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c index 6a23cc6947c3..6028bc7cc01b 100644 --- a/arch/um/sys-i386/ptrace.c +++ b/arch/um/sys-i386/ptrace.c | |||
| @@ -23,7 +23,14 @@ void arch_switch_to_tt(struct task_struct *from, struct task_struct *to) | |||
| 23 | 23 | ||
| 24 | void arch_switch_to_skas(struct task_struct *from, struct task_struct *to) | 24 | void arch_switch_to_skas(struct task_struct *from, struct task_struct *to) |
| 25 | { | 25 | { |
| 26 | arch_switch_tls_skas(from, to); | 26 | int err = arch_switch_tls_skas(from, to); |
| 27 | if (!err) | ||
| 28 | return; | ||
| 29 | |||
| 30 | if (err != -EINVAL) | ||
| 31 | printk(KERN_WARNING "arch_switch_tls_skas failed, errno %d, not EINVAL\n", -err); | ||
| 32 | else | ||
| 33 | printk(KERN_WARNING "arch_switch_tls_skas failed, errno = EINVAL\n"); | ||
| 27 | } | 34 | } |
| 28 | 35 | ||
| 29 | int is_syscall(unsigned long addr) | 36 | int is_syscall(unsigned long addr) |
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c index e3c5bc593fae..2251654c6b45 100644 --- a/arch/um/sys-i386/tls.c +++ b/arch/um/sys-i386/tls.c | |||
| @@ -70,8 +70,6 @@ static int get_free_idx(struct task_struct* task) | |||
| 70 | return -ESRCH; | 70 | return -ESRCH; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | #define O_FORCE 1 | ||
| 74 | |||
| 75 | static inline void clear_user_desc(struct user_desc* info) | 73 | static inline void clear_user_desc(struct user_desc* info) |
| 76 | { | 74 | { |
| 77 | /* Postcondition: LDT_empty(info) returns true. */ | 75 | /* Postcondition: LDT_empty(info) returns true. */ |
| @@ -84,6 +82,8 @@ static inline void clear_user_desc(struct user_desc* info) | |||
| 84 | info->seg_not_present = 1; | 82 | info->seg_not_present = 1; |
| 85 | } | 83 | } |
| 86 | 84 | ||
| 85 | #define O_FORCE 1 | ||
| 86 | |||
| 87 | static int load_TLS(int flags, struct task_struct *to) | 87 | static int load_TLS(int flags, struct task_struct *to) |
| 88 | { | 88 | { |
| 89 | int ret = 0; | 89 | int ret = 0; |
| @@ -162,7 +162,13 @@ void clear_flushed_tls(struct task_struct *task) | |||
| 162 | * SKAS patch. */ | 162 | * SKAS patch. */ |
| 163 | int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to) | 163 | int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to) |
| 164 | { | 164 | { |
| 165 | return load_TLS(O_FORCE, to); | 165 | /* We have no need whatsoever to switch TLS for kernel threads; beyond |
| 166 | * that, that would also result in us calling os_set_thread_area with | ||
| 167 | * userspace_pid[cpu] == 0, which gives an error. */ | ||
| 168 | if (likely(to->mm)) | ||
| 169 | return load_TLS(O_FORCE, to); | ||
| 170 | |||
| 171 | return 0; | ||
| 166 | } | 172 | } |
| 167 | 173 | ||
| 168 | int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to) | 174 | int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to) |
| @@ -324,3 +330,4 @@ int ptrace_get_thread_area(struct task_struct *child, int idx, | |||
| 324 | out: | 330 | out: |
| 325 | return ret; | 331 | return ret; |
| 326 | } | 332 | } |
| 333 | |||
