diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2009-02-27 16:25:28 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-02 06:07:48 -0500 |
commit | 389d1fb11e5f2a16b5e34c547756f0c4dec641f7 (patch) | |
tree | 83962a579c85d43356bfc67302d83594d6163034 /arch/x86 | |
parent | db949bba3c7cf2e664ac12e237c6d4c914f0c69d (diff) |
x86: unify chunks of kernel/process*.c
With x86-32 and -64 using the same mechanism for managing the
tss io permissions bitmap, large chunks of process*.c are
trivially unifyable, including:
- exit_thread
- flush_thread
- __switch_to_xtra (along with tsc enable/disable)
and as bonus pickups:
- sys_fork
- sys_vfork
(Note: asmlinkage expands to empty on x86-64)
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/system.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 191 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 172 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 188 |
4 files changed, 192 insertions, 361 deletions
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index c00bfdbdd456..1a7bf39f72dc 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -20,6 +20,8 @@ | |||
20 | struct task_struct; /* one of the stranger aspects of C forward declarations */ | 20 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
21 | struct task_struct *__switch_to(struct task_struct *prev, | 21 | struct task_struct *__switch_to(struct task_struct *prev, |
22 | struct task_struct *next); | 22 | struct task_struct *next); |
23 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
24 | struct tss_struct *tss); | ||
23 | 25 | ||
24 | #ifdef CONFIG_X86_32 | 26 | #ifdef CONFIG_X86_32 |
25 | 27 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 87b69d4fac16..6afa5232dbb7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -1,8 +1,8 @@ | |||
1 | #include <linux/errno.h> | 1 | #include <linux/errno.h> |
2 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
4 | #include <asm/idle.h> | ||
5 | #include <linux/smp.h> | 4 | #include <linux/smp.h> |
5 | #include <linux/prctl.h> | ||
6 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
@@ -11,6 +11,9 @@ | |||
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/apic.h> | 13 | #include <asm/apic.h> |
14 | #include <asm/idle.h> | ||
15 | #include <asm/uaccess.h> | ||
16 | #include <asm/i387.h> | ||
14 | 17 | ||
15 | unsigned long idle_halt; | 18 | unsigned long idle_halt; |
16 | EXPORT_SYMBOL(idle_halt); | 19 | EXPORT_SYMBOL(idle_halt); |
@@ -56,6 +59,192 @@ void arch_task_cache_init(void) | |||
56 | } | 59 | } |
57 | 60 | ||
58 | /* | 61 | /* |
62 | * Free current thread data structures etc.. | ||
63 | */ | ||
64 | void exit_thread(void) | ||
65 | { | ||
66 | struct task_struct *me = current; | ||
67 | struct thread_struct *t = &me->thread; | ||
68 | |||
69 | if (me->thread.io_bitmap_ptr) { | ||
70 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | ||
71 | |||
72 | kfree(t->io_bitmap_ptr); | ||
73 | t->io_bitmap_ptr = NULL; | ||
74 | clear_thread_flag(TIF_IO_BITMAP); | ||
75 | /* | ||
76 | * Careful, clear this in the TSS too: | ||
77 | */ | ||
78 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | ||
79 | t->io_bitmap_max = 0; | ||
80 | put_cpu(); | ||
81 | } | ||
82 | |||
83 | ds_exit_thread(current); | ||
84 | } | ||
85 | |||
86 | void flush_thread(void) | ||
87 | { | ||
88 | struct task_struct *tsk = current; | ||
89 | |||
90 | #ifdef CONFIG_X86_64 | ||
91 | if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { | ||
92 | clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); | ||
93 | if (test_tsk_thread_flag(tsk, TIF_IA32)) { | ||
94 | clear_tsk_thread_flag(tsk, TIF_IA32); | ||
95 | } else { | ||
96 | set_tsk_thread_flag(tsk, TIF_IA32); | ||
97 | current_thread_info()->status |= TS_COMPAT; | ||
98 | } | ||
99 | } | ||
100 | #endif | ||
101 | |||
102 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
103 | |||
104 | tsk->thread.debugreg0 = 0; | ||
105 | tsk->thread.debugreg1 = 0; | ||
106 | tsk->thread.debugreg2 = 0; | ||
107 | tsk->thread.debugreg3 = 0; | ||
108 | tsk->thread.debugreg6 = 0; | ||
109 | tsk->thread.debugreg7 = 0; | ||
110 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
111 | /* | ||
112 | * Forget coprocessor state.. | ||
113 | */ | ||
114 | tsk->fpu_counter = 0; | ||
115 | clear_fpu(tsk); | ||
116 | clear_used_math(); | ||
117 | } | ||
118 | |||
119 | static void hard_disable_TSC(void) | ||
120 | { | ||
121 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
122 | } | ||
123 | |||
124 | void disable_TSC(void) | ||
125 | { | ||
126 | preempt_disable(); | ||
127 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
128 | /* | ||
129 | * Must flip the CPU state synchronously with | ||
130 | * TIF_NOTSC in the current running context. | ||
131 | */ | ||
132 | hard_disable_TSC(); | ||
133 | preempt_enable(); | ||
134 | } | ||
135 | |||
136 | static void hard_enable_TSC(void) | ||
137 | { | ||
138 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
139 | } | ||
140 | |||
141 | static void enable_TSC(void) | ||
142 | { | ||
143 | preempt_disable(); | ||
144 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
145 | /* | ||
146 | * Must flip the CPU state synchronously with | ||
147 | * TIF_NOTSC in the current running context. | ||
148 | */ | ||
149 | hard_enable_TSC(); | ||
150 | preempt_enable(); | ||
151 | } | ||
152 | |||
153 | int get_tsc_mode(unsigned long adr) | ||
154 | { | ||
155 | unsigned int val; | ||
156 | |||
157 | if (test_thread_flag(TIF_NOTSC)) | ||
158 | val = PR_TSC_SIGSEGV; | ||
159 | else | ||
160 | val = PR_TSC_ENABLE; | ||
161 | |||
162 | return put_user(val, (unsigned int __user *)adr); | ||
163 | } | ||
164 | |||
165 | int set_tsc_mode(unsigned int val) | ||
166 | { | ||
167 | if (val == PR_TSC_SIGSEGV) | ||
168 | disable_TSC(); | ||
169 | else if (val == PR_TSC_ENABLE) | ||
170 | enable_TSC(); | ||
171 | else | ||
172 | return -EINVAL; | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
178 | struct tss_struct *tss) | ||
179 | { | ||
180 | struct thread_struct *prev, *next; | ||
181 | |||
182 | prev = &prev_p->thread; | ||
183 | next = &next_p->thread; | ||
184 | |||
185 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
186 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
187 | ds_switch_to(prev_p, next_p); | ||
188 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
189 | update_debugctlmsr(next->debugctlmsr); | ||
190 | |||
191 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
192 | set_debugreg(next->debugreg0, 0); | ||
193 | set_debugreg(next->debugreg1, 1); | ||
194 | set_debugreg(next->debugreg2, 2); | ||
195 | set_debugreg(next->debugreg3, 3); | ||
196 | /* no 4 and 5 */ | ||
197 | set_debugreg(next->debugreg6, 6); | ||
198 | set_debugreg(next->debugreg7, 7); | ||
199 | } | ||
200 | |||
201 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
202 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
203 | /* prev and next are different */ | ||
204 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
205 | hard_disable_TSC(); | ||
206 | else | ||
207 | hard_enable_TSC(); | ||
208 | } | ||
209 | |||
210 | if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
211 | /* | ||
212 | * Copy the relevant range of the IO bitmap. | ||
213 | * Normally this is 128 bytes or less: | ||
214 | */ | ||
215 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, | ||
216 | max(prev->io_bitmap_max, next->io_bitmap_max)); | ||
217 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | ||
218 | /* | ||
219 | * Clear any possible leftover bits: | ||
220 | */ | ||
221 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | int sys_fork(struct pt_regs *regs) | ||
226 | { | ||
227 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * This is trivial, and on the face of it looks like it | ||
232 | * could equally well be done in user mode. | ||
233 | * | ||
234 | * Not so, for quite unobvious reasons - register pressure. | ||
235 | * In user mode vfork() cannot have a stack frame, and if | ||
236 | * done by calling the "clone()" system call directly, you | ||
237 | * do not have enough call-clobbered registers to hold all | ||
238 | * the information you need. | ||
239 | */ | ||
240 | int sys_vfork(struct pt_regs *regs) | ||
241 | { | ||
242 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, | ||
243 | NULL, NULL); | ||
244 | } | ||
245 | |||
246 | |||
247 | /* | ||
59 | * Idle related variables and functions | 248 | * Idle related variables and functions |
60 | */ | 249 | */ |
61 | unsigned long boot_option_idle_override = 0; | 250 | unsigned long boot_option_idle_override = 0; |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a59314e877f0..14014d766cad 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -230,52 +230,6 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | |||
230 | } | 230 | } |
231 | EXPORT_SYMBOL(kernel_thread); | 231 | EXPORT_SYMBOL(kernel_thread); |
232 | 232 | ||
233 | /* | ||
234 | * Free current thread data structures etc.. | ||
235 | */ | ||
236 | void exit_thread(void) | ||
237 | { | ||
238 | /* The process may have allocated an io port bitmap... nuke it. */ | ||
239 | if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { | ||
240 | struct task_struct *tsk = current; | ||
241 | struct thread_struct *t = &tsk->thread; | ||
242 | int cpu = get_cpu(); | ||
243 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | ||
244 | |||
245 | kfree(t->io_bitmap_ptr); | ||
246 | t->io_bitmap_ptr = NULL; | ||
247 | clear_thread_flag(TIF_IO_BITMAP); | ||
248 | /* | ||
249 | * Careful, clear this in the TSS too: | ||
250 | */ | ||
251 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | ||
252 | t->io_bitmap_max = 0; | ||
253 | put_cpu(); | ||
254 | } | ||
255 | |||
256 | ds_exit_thread(current); | ||
257 | } | ||
258 | |||
259 | void flush_thread(void) | ||
260 | { | ||
261 | struct task_struct *tsk = current; | ||
262 | |||
263 | tsk->thread.debugreg0 = 0; | ||
264 | tsk->thread.debugreg1 = 0; | ||
265 | tsk->thread.debugreg2 = 0; | ||
266 | tsk->thread.debugreg3 = 0; | ||
267 | tsk->thread.debugreg6 = 0; | ||
268 | tsk->thread.debugreg7 = 0; | ||
269 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
270 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
271 | /* | ||
272 | * Forget coprocessor state.. | ||
273 | */ | ||
274 | tsk->fpu_counter = 0; | ||
275 | clear_fpu(tsk); | ||
276 | clear_used_math(); | ||
277 | } | ||
278 | |||
279 | void release_thread(struct task_struct *dead_task) | 233 | void release_thread(struct task_struct *dead_task) |
280 | { | 234 | { |
281 | BUG_ON(dead_task->mm); | 235 | BUG_ON(dead_task->mm); |
@@ -363,112 +317,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |||
363 | } | 317 | } |
364 | EXPORT_SYMBOL_GPL(start_thread); | 318 | EXPORT_SYMBOL_GPL(start_thread); |
365 | 319 | ||
366 | static void hard_disable_TSC(void) | ||
367 | { | ||
368 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
369 | } | ||
370 | |||
371 | void disable_TSC(void) | ||
372 | { | ||
373 | preempt_disable(); | ||
374 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
375 | /* | ||
376 | * Must flip the CPU state synchronously with | ||
377 | * TIF_NOTSC in the current running context. | ||
378 | */ | ||
379 | hard_disable_TSC(); | ||
380 | preempt_enable(); | ||
381 | } | ||
382 | |||
383 | static void hard_enable_TSC(void) | ||
384 | { | ||
385 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
386 | } | ||
387 | |||
388 | static void enable_TSC(void) | ||
389 | { | ||
390 | preempt_disable(); | ||
391 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
392 | /* | ||
393 | * Must flip the CPU state synchronously with | ||
394 | * TIF_NOTSC in the current running context. | ||
395 | */ | ||
396 | hard_enable_TSC(); | ||
397 | preempt_enable(); | ||
398 | } | ||
399 | |||
400 | int get_tsc_mode(unsigned long adr) | ||
401 | { | ||
402 | unsigned int val; | ||
403 | |||
404 | if (test_thread_flag(TIF_NOTSC)) | ||
405 | val = PR_TSC_SIGSEGV; | ||
406 | else | ||
407 | val = PR_TSC_ENABLE; | ||
408 | |||
409 | return put_user(val, (unsigned int __user *)adr); | ||
410 | } | ||
411 | |||
412 | int set_tsc_mode(unsigned int val) | ||
413 | { | ||
414 | if (val == PR_TSC_SIGSEGV) | ||
415 | disable_TSC(); | ||
416 | else if (val == PR_TSC_ENABLE) | ||
417 | enable_TSC(); | ||
418 | else | ||
419 | return -EINVAL; | ||
420 | |||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | static noinline void | ||
425 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
426 | struct tss_struct *tss) | ||
427 | { | ||
428 | struct thread_struct *prev, *next; | ||
429 | |||
430 | prev = &prev_p->thread; | ||
431 | next = &next_p->thread; | ||
432 | |||
433 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
434 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
435 | ds_switch_to(prev_p, next_p); | ||
436 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
437 | update_debugctlmsr(next->debugctlmsr); | ||
438 | |||
439 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
440 | set_debugreg(next->debugreg0, 0); | ||
441 | set_debugreg(next->debugreg1, 1); | ||
442 | set_debugreg(next->debugreg2, 2); | ||
443 | set_debugreg(next->debugreg3, 3); | ||
444 | /* no 4 and 5 */ | ||
445 | set_debugreg(next->debugreg6, 6); | ||
446 | set_debugreg(next->debugreg7, 7); | ||
447 | } | ||
448 | |||
449 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
450 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
451 | /* prev and next are different */ | ||
452 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
453 | hard_disable_TSC(); | ||
454 | else | ||
455 | hard_enable_TSC(); | ||
456 | } | ||
457 | |||
458 | if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
459 | /* | ||
460 | * Copy the relevant range of the IO bitmap. | ||
461 | * Normally this is 128 bytes or less: | ||
462 | */ | ||
463 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, | ||
464 | max(prev->io_bitmap_max, next->io_bitmap_max)); | ||
465 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | ||
466 | /* | ||
467 | * Clear any possible leftover bits: | ||
468 | */ | ||
469 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | ||
470 | } | ||
471 | } | ||
472 | 320 | ||
473 | /* | 321 | /* |
474 | * switch_to(x,yn) should switch tasks from x to y. | 322 | * switch_to(x,yn) should switch tasks from x to y. |
@@ -582,11 +430,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
582 | return prev_p; | 430 | return prev_p; |
583 | } | 431 | } |
584 | 432 | ||
585 | int sys_fork(struct pt_regs *regs) | ||
586 | { | ||
587 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
588 | } | ||
589 | |||
590 | int sys_clone(struct pt_regs *regs) | 433 | int sys_clone(struct pt_regs *regs) |
591 | { | 434 | { |
592 | unsigned long clone_flags; | 435 | unsigned long clone_flags; |
@@ -603,21 +446,6 @@ int sys_clone(struct pt_regs *regs) | |||
603 | } | 446 | } |
604 | 447 | ||
605 | /* | 448 | /* |
606 | * This is trivial, and on the face of it looks like it | ||
607 | * could equally well be done in user mode. | ||
608 | * | ||
609 | * Not so, for quite unobvious reasons - register pressure. | ||
610 | * In user mode vfork() cannot have a stack frame, and if | ||
611 | * done by calling the "clone()" system call directly, you | ||
612 | * do not have enough call-clobbered registers to hold all | ||
613 | * the information you need. | ||
614 | */ | ||
615 | int sys_vfork(struct pt_regs *regs) | ||
616 | { | ||
617 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * sys_execve() executes a new program. | 449 | * sys_execve() executes a new program. |
622 | */ | 450 | */ |
623 | int sys_execve(struct pt_regs *regs) | 451 | int sys_execve(struct pt_regs *regs) |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 836ef6575f01..abb7e6a7f0c6 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -237,61 +237,6 @@ void show_regs(struct pt_regs *regs) | |||
237 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); | 237 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); |
238 | } | 238 | } |
239 | 239 | ||
240 | /* | ||
241 | * Free current thread data structures etc.. | ||
242 | */ | ||
243 | void exit_thread(void) | ||
244 | { | ||
245 | struct task_struct *me = current; | ||
246 | struct thread_struct *t = &me->thread; | ||
247 | |||
248 | if (me->thread.io_bitmap_ptr) { | ||
249 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | ||
250 | |||
251 | kfree(t->io_bitmap_ptr); | ||
252 | t->io_bitmap_ptr = NULL; | ||
253 | clear_thread_flag(TIF_IO_BITMAP); | ||
254 | /* | ||
255 | * Careful, clear this in the TSS too: | ||
256 | */ | ||
257 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | ||
258 | t->io_bitmap_max = 0; | ||
259 | put_cpu(); | ||
260 | } | ||
261 | |||
262 | ds_exit_thread(current); | ||
263 | } | ||
264 | |||
265 | void flush_thread(void) | ||
266 | { | ||
267 | struct task_struct *tsk = current; | ||
268 | |||
269 | if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { | ||
270 | clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); | ||
271 | if (test_tsk_thread_flag(tsk, TIF_IA32)) { | ||
272 | clear_tsk_thread_flag(tsk, TIF_IA32); | ||
273 | } else { | ||
274 | set_tsk_thread_flag(tsk, TIF_IA32); | ||
275 | current_thread_info()->status |= TS_COMPAT; | ||
276 | } | ||
277 | } | ||
278 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
279 | |||
280 | tsk->thread.debugreg0 = 0; | ||
281 | tsk->thread.debugreg1 = 0; | ||
282 | tsk->thread.debugreg2 = 0; | ||
283 | tsk->thread.debugreg3 = 0; | ||
284 | tsk->thread.debugreg6 = 0; | ||
285 | tsk->thread.debugreg7 = 0; | ||
286 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
287 | /* | ||
288 | * Forget coprocessor state.. | ||
289 | */ | ||
290 | tsk->fpu_counter = 0; | ||
291 | clear_fpu(tsk); | ||
292 | clear_used_math(); | ||
293 | } | ||
294 | |||
295 | void release_thread(struct task_struct *dead_task) | 240 | void release_thread(struct task_struct *dead_task) |
296 | { | 241 | { |
297 | if (dead_task->mm) { | 242 | if (dead_task->mm) { |
@@ -425,118 +370,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |||
425 | } | 370 | } |
426 | EXPORT_SYMBOL_GPL(start_thread); | 371 | EXPORT_SYMBOL_GPL(start_thread); |
427 | 372 | ||
428 | static void hard_disable_TSC(void) | ||
429 | { | ||
430 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
431 | } | ||
432 | |||
433 | void disable_TSC(void) | ||
434 | { | ||
435 | preempt_disable(); | ||
436 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
437 | /* | ||
438 | * Must flip the CPU state synchronously with | ||
439 | * TIF_NOTSC in the current running context. | ||
440 | */ | ||
441 | hard_disable_TSC(); | ||
442 | preempt_enable(); | ||
443 | } | ||
444 | |||
445 | static void hard_enable_TSC(void) | ||
446 | { | ||
447 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
448 | } | ||
449 | |||
450 | static void enable_TSC(void) | ||
451 | { | ||
452 | preempt_disable(); | ||
453 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
454 | /* | ||
455 | * Must flip the CPU state synchronously with | ||
456 | * TIF_NOTSC in the current running context. | ||
457 | */ | ||
458 | hard_enable_TSC(); | ||
459 | preempt_enable(); | ||
460 | } | ||
461 | |||
462 | int get_tsc_mode(unsigned long adr) | ||
463 | { | ||
464 | unsigned int val; | ||
465 | |||
466 | if (test_thread_flag(TIF_NOTSC)) | ||
467 | val = PR_TSC_SIGSEGV; | ||
468 | else | ||
469 | val = PR_TSC_ENABLE; | ||
470 | |||
471 | return put_user(val, (unsigned int __user *)adr); | ||
472 | } | ||
473 | |||
474 | int set_tsc_mode(unsigned int val) | ||
475 | { | ||
476 | if (val == PR_TSC_SIGSEGV) | ||
477 | disable_TSC(); | ||
478 | else if (val == PR_TSC_ENABLE) | ||
479 | enable_TSC(); | ||
480 | else | ||
481 | return -EINVAL; | ||
482 | |||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * This special macro can be used to load a debugging register | ||
488 | */ | ||
489 | #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r) | ||
490 | |||
491 | static inline void __switch_to_xtra(struct task_struct *prev_p, | ||
492 | struct task_struct *next_p, | ||
493 | struct tss_struct *tss) | ||
494 | { | ||
495 | struct thread_struct *prev, *next; | ||
496 | |||
497 | prev = &prev_p->thread, | ||
498 | next = &next_p->thread; | ||
499 | |||
500 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
501 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
502 | ds_switch_to(prev_p, next_p); | ||
503 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
504 | update_debugctlmsr(next->debugctlmsr); | ||
505 | |||
506 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
507 | loaddebug(next, 0); | ||
508 | loaddebug(next, 1); | ||
509 | loaddebug(next, 2); | ||
510 | loaddebug(next, 3); | ||
511 | /* no 4 and 5 */ | ||
512 | loaddebug(next, 6); | ||
513 | loaddebug(next, 7); | ||
514 | } | ||
515 | |||
516 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
517 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
518 | /* prev and next are different */ | ||
519 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
520 | hard_disable_TSC(); | ||
521 | else | ||
522 | hard_enable_TSC(); | ||
523 | } | ||
524 | |||
525 | if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
526 | /* | ||
527 | * Copy the relevant range of the IO bitmap. | ||
528 | * Normally this is 128 bytes or less: | ||
529 | */ | ||
530 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, | ||
531 | max(prev->io_bitmap_max, next->io_bitmap_max)); | ||
532 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | ||
533 | /* | ||
534 | * Clear any possible leftover bits: | ||
535 | */ | ||
536 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | ||
537 | } | ||
538 | } | ||
539 | |||
540 | /* | 373 | /* |
541 | * switch_to(x,y) should switch tasks from x to y. | 374 | * switch_to(x,y) should switch tasks from x to y. |
542 | * | 375 | * |
@@ -694,11 +527,6 @@ void set_personality_64bit(void) | |||
694 | current->personality &= ~READ_IMPLIES_EXEC; | 527 | current->personality &= ~READ_IMPLIES_EXEC; |
695 | } | 528 | } |
696 | 529 | ||
697 | asmlinkage long sys_fork(struct pt_regs *regs) | ||
698 | { | ||
699 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
700 | } | ||
701 | |||
702 | asmlinkage long | 530 | asmlinkage long |
703 | sys_clone(unsigned long clone_flags, unsigned long newsp, | 531 | sys_clone(unsigned long clone_flags, unsigned long newsp, |
704 | void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) | 532 | void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) |
@@ -708,22 +536,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp, | |||
708 | return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); | 536 | return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); |
709 | } | 537 | } |
710 | 538 | ||
711 | /* | ||
712 | * This is trivial, and on the face of it looks like it | ||
713 | * could equally well be done in user mode. | ||
714 | * | ||
715 | * Not so, for quite unobvious reasons - register pressure. | ||
716 | * In user mode vfork() cannot have a stack frame, and if | ||
717 | * done by calling the "clone()" system call directly, you | ||
718 | * do not have enough call-clobbered registers to hold all | ||
719 | * the information you need. | ||
720 | */ | ||
721 | asmlinkage long sys_vfork(struct pt_regs *regs) | ||
722 | { | ||
723 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, | ||
724 | NULL, NULL); | ||
725 | } | ||
726 | |||
727 | unsigned long get_wchan(struct task_struct *p) | 539 | unsigned long get_wchan(struct task_struct *p) |
728 | { | 540 | { |
729 | unsigned long stack; | 541 | unsigned long stack; |