aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/process.c')
-rw-r--r--arch/i386/kernel/process.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index ae924c416b68..905364d42847 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -56,6 +56,7 @@
56 56
57#include <asm/tlbflush.h> 57#include <asm/tlbflush.h>
58#include <asm/cpu.h> 58#include <asm/cpu.h>
59#include <asm/pda.h>
59 60
60asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 61asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
61 62
@@ -346,6 +347,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
346 347
347 regs.xds = __USER_DS; 348 regs.xds = __USER_DS;
348 regs.xes = __USER_DS; 349 regs.xes = __USER_DS;
350 regs.xgs = __KERNEL_PDA;
349 regs.orig_eax = -1; 351 regs.orig_eax = -1;
350 regs.eip = (unsigned long) kernel_thread_helper; 352 regs.eip = (unsigned long) kernel_thread_helper;
351 regs.xcs = __KERNEL_CS | get_kernel_rpl(); 353 regs.xcs = __KERNEL_CS | get_kernel_rpl();
@@ -431,7 +433,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
431 p->thread.eip = (unsigned long) ret_from_fork; 433 p->thread.eip = (unsigned long) ret_from_fork;
432 434
433 savesegment(fs,p->thread.fs); 435 savesegment(fs,p->thread.fs);
434 savesegment(gs,p->thread.gs);
435 436
436 tsk = current; 437 tsk = current;
437 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 438 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -659,16 +660,16 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
659 load_esp0(tss, next); 660 load_esp0(tss, next);
660 661
661 /* 662 /*
662 * Save away %fs and %gs. No need to save %es and %ds, as 663 * Save away %fs. No need to save %gs, as it was saved on the
663 * those are always kernel segments while inside the kernel. 664 * stack on entry. No need to save %es and %ds, as those are
664 * Doing this before setting the new TLS descriptors avoids 665 * always kernel segments while inside the kernel. Doing this
665 * the situation where we temporarily have non-reloadable 666 * before setting the new TLS descriptors avoids the situation
666 * segments in %fs and %gs. This could be an issue if the 667 * where we temporarily have non-reloadable segments in %fs
667 * NMI handler ever used %fs or %gs (it does not today), or 668 * and %gs. This could be an issue if the NMI handler ever
668 * if the kernel is running inside of a hypervisor layer. 669 * used %fs or %gs (it does not today), or if the kernel is
670 * running inside of a hypervisor layer.
669 */ 671 */
670 savesegment(fs, prev->fs); 672 savesegment(fs, prev->fs);
671 savesegment(gs, prev->gs);
672 673
673 /* 674 /*
674 * Load the per-thread Thread-Local Storage descriptor. 675 * Load the per-thread Thread-Local Storage descriptor.
@@ -676,16 +677,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
676 load_TLS(next, cpu); 677 load_TLS(next, cpu);
677 678
678 /* 679 /*
679 * Restore %fs and %gs if needed. 680 * Restore %fs if needed.
680 * 681 *
681 * Glibc normally makes %fs be zero, and %gs is one of 682 * Glibc normally makes %fs be zero.
682 * the TLS segments.
683 */ 683 */
684 if (unlikely(prev->fs | next->fs)) 684 if (unlikely(prev->fs | next->fs))
685 loadsegment(fs, next->fs); 685 loadsegment(fs, next->fs);
686 686
687 if (prev->gs | next->gs)
688 loadsegment(gs, next->gs);
689 687
690 /* 688 /*
691 * Restore IOPL if needed. 689 * Restore IOPL if needed.