aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2006-12-06 20:14:02 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-12-06 20:14:02 -0500
commitf95d47caae5302a63d92be9a0292abc90e2a14e1 (patch)
treecfa963975d104c56aba28df6c941759175ed4b98
parent62111195800d80c66cdc69063ea3145878c99fbf (diff)
[PATCH] i386: Use %gs as the PDA base-segment in the kernel
This patch is the meat of the PDA change. This patch makes several related changes: 1: Most significantly, %gs is now used in the kernel. This means that on entry, the old value of %gs is saved away, and it is reloaded with __KERNEL_PDA. 2: entry.S constructs the stack in the shape of struct pt_regs, and this is passed around the kernel so that the process's saved register state can be accessed. Unfortunately struct pt_regs doesn't currently have space for %gs (or %fs). This patch extends pt_regs to add space for gs (no space is allocated for %fs, since it won't be used, and it would just complicate the code in entry.S to work around the space). 3: Because %gs is now saved on the stack like %ds, %es and the integer registers, there are a number of places where it no longer needs to be handled specially; namely context switch, and saving/restoring the register state in a signal context. 4: And since kernel threads run in kernel space and call normal kernel code, they need to be created with their %gs == __KERNEL_PDA. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Chuck Ebbert <76306.1226@compuserve.com> Cc: Zachary Amsden <zach@vmware.com> Cc: Jan Beulich <jbeulich@novell.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org>
-rw-r--r--arch/i386/kernel/asm-offsets.c1
-rw-r--r--arch/i386/kernel/cpu/common.c21
-rw-r--r--arch/i386/kernel/entry.S70
-rw-r--r--arch/i386/kernel/head.S31
-rw-r--r--arch/i386/kernel/process.c26
-rw-r--r--arch/i386/kernel/signal.c6
-rw-r--r--include/asm-i386/mmu_context.h4
-rw-r--r--include/asm-i386/processor.h4
-rw-r--r--include/asm-i386/ptrace.h2
-rw-r--r--kernel/fork.c2
10 files changed, 117 insertions, 50 deletions
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index 70b19807acf9..9620872d3534 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -72,6 +72,7 @@ void foo(void)
72 OFFSET(PT_EAX, pt_regs, eax); 72 OFFSET(PT_EAX, pt_regs, eax);
73 OFFSET(PT_DS, pt_regs, xds); 73 OFFSET(PT_DS, pt_regs, xds);
74 OFFSET(PT_ES, pt_regs, xes); 74 OFFSET(PT_ES, pt_regs, xes);
75 OFFSET(PT_GS, pt_regs, xgs);
75 OFFSET(PT_ORIG_EAX, pt_regs, orig_eax); 76 OFFSET(PT_ORIG_EAX, pt_regs, orig_eax);
76 OFFSET(PT_EIP, pt_regs, eip); 77 OFFSET(PT_EIP, pt_regs, eip);
77 OFFSET(PT_CS, pt_regs, xcs); 78 OFFSET(PT_CS, pt_regs, xcs);
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 2534e25ed745..4e63d8ce602b 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -593,6 +593,14 @@ void __init early_cpu_init(void)
593#endif 593#endif
594} 594}
595 595
596/* Make sure %gs is initialized properly in idle threads */
597struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
598{
599 memset(regs, 0, sizeof(struct pt_regs));
600 regs->xgs = __KERNEL_PDA;
601 return regs;
602}
603
596__cpuinit int alloc_gdt(int cpu) 604__cpuinit int alloc_gdt(int cpu)
597{ 605{
598 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); 606 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
@@ -644,6 +652,14 @@ struct i386_pda boot_pda = {
644 ._pda = &boot_pda, 652 ._pda = &boot_pda,
645}; 653};
646 654
655static inline void set_kernel_gs(void)
656{
657 /* Set %gs for this CPU's PDA. Memory clobber is to create a
658 barrier with respect to any PDA operations, so the compiler
659 doesn't move any before here. */
660 asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
661}
662
647/* Initialize the CPU's GDT and PDA. The boot CPU does this for 663/* Initialize the CPU's GDT and PDA. The boot CPU does this for
648 itself, but secondaries find this done for them. */ 664 itself, but secondaries find this done for them. */
649__cpuinit int init_gdt(int cpu, struct task_struct *idle) 665__cpuinit int init_gdt(int cpu, struct task_struct *idle)
@@ -693,6 +709,7 @@ static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
693 the boot CPU, this will transition from the boot gdt+pda to 709 the boot CPU, this will transition from the boot gdt+pda to
694 the real ones). */ 710 the real ones). */
695 load_gdt(cpu_gdt_descr); 711 load_gdt(cpu_gdt_descr);
712 set_kernel_gs();
696 713
697 if (cpu_test_and_set(cpu, cpu_initialized)) { 714 if (cpu_test_and_set(cpu, cpu_initialized)) {
698 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 715 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
@@ -731,8 +748,8 @@ static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
731 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 748 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
732#endif 749#endif
733 750
734 /* Clear %fs and %gs. */ 751 /* Clear %fs. */
735 asm volatile ("movl %0, %%fs; movl %0, %%gs" : : "r" (0)); 752 asm volatile ("mov %0, %%fs" : : "r" (0));
736 753
737 /* Clear all 6 debug registers: */ 754 /* Clear all 6 debug registers: */
738 set_debugreg(0, 0); 755 set_debugreg(0, 0);
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 0069bf01603e..b99d4a160078 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -30,12 +30,13 @@
30 * 18(%esp) - %eax 30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds 31 * 1C(%esp) - %ds
32 * 20(%esp) - %es 32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax 33 * 24(%esp) - %gs
34 * 28(%esp) - %eip 34 * 28(%esp) - orig_eax
35 * 2C(%esp) - %cs 35 * 2C(%esp) - %eip
36 * 30(%esp) - %eflags 36 * 30(%esp) - %cs
37 * 34(%esp) - %oldesp 37 * 34(%esp) - %eflags
38 * 38(%esp) - %oldss 38 * 38(%esp) - %oldesp
39 * 3C(%esp) - %oldss
39 * 40 *
40 * "current" is in register %ebx during any slow entries. 41 * "current" is in register %ebx during any slow entries.
41 */ 42 */
@@ -92,6 +93,9 @@ VM_MASK = 0x00020000
92 93
93#define SAVE_ALL \ 94#define SAVE_ALL \
94 cld; \ 95 cld; \
96 pushl %gs; \
97 CFI_ADJUST_CFA_OFFSET 4;\
98 /*CFI_REL_OFFSET gs, 0;*/\
95 pushl %es; \ 99 pushl %es; \
96 CFI_ADJUST_CFA_OFFSET 4;\ 100 CFI_ADJUST_CFA_OFFSET 4;\
97 /*CFI_REL_OFFSET es, 0;*/\ 101 /*CFI_REL_OFFSET es, 0;*/\
@@ -121,7 +125,9 @@ VM_MASK = 0x00020000
121 CFI_REL_OFFSET ebx, 0;\ 125 CFI_REL_OFFSET ebx, 0;\
122 movl $(__USER_DS), %edx; \ 126 movl $(__USER_DS), %edx; \
123 movl %edx, %ds; \ 127 movl %edx, %ds; \
124 movl %edx, %es; 128 movl %edx, %es; \
129 movl $(__KERNEL_PDA), %edx; \
130 movl %edx, %gs
125 131
126#define RESTORE_INT_REGS \ 132#define RESTORE_INT_REGS \
127 popl %ebx; \ 133 popl %ebx; \
@@ -154,17 +160,22 @@ VM_MASK = 0x00020000
1542: popl %es; \ 1602: popl %es; \
155 CFI_ADJUST_CFA_OFFSET -4;\ 161 CFI_ADJUST_CFA_OFFSET -4;\
156 /*CFI_RESTORE es;*/\ 162 /*CFI_RESTORE es;*/\
157.section .fixup,"ax"; \ 1633: popl %gs; \
1583: movl $0,(%esp); \ 164 CFI_ADJUST_CFA_OFFSET -4;\
159 jmp 1b; \ 165 /*CFI_RESTORE gs;*/\
166.pushsection .fixup,"ax"; \
1604: movl $0,(%esp); \ 1674: movl $0,(%esp); \
168 jmp 1b; \
1695: movl $0,(%esp); \
161 jmp 2b; \ 170 jmp 2b; \
162.previous; \ 1716: movl $0,(%esp); \
172 jmp 3b; \
163.section __ex_table,"a";\ 173.section __ex_table,"a";\
164 .align 4; \ 174 .align 4; \
165 .long 1b,3b; \ 175 .long 1b,4b; \
166 .long 2b,4b; \ 176 .long 2b,5b; \
167.previous 177 .long 3b,6b; \
178.popsection
168 179
169#define RING0_INT_FRAME \ 180#define RING0_INT_FRAME \
170 CFI_STARTPROC simple;\ 181 CFI_STARTPROC simple;\
@@ -231,6 +242,7 @@ check_userspace:
231 andl $(VM_MASK | SEGMENT_RPL_MASK), %eax 242 andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
232 cmpl $USER_RPL, %eax 243 cmpl $USER_RPL, %eax
233 jb resume_kernel # not returning to v8086 or userspace 244 jb resume_kernel # not returning to v8086 or userspace
245
234ENTRY(resume_userspace) 246ENTRY(resume_userspace)
235 DISABLE_INTERRUPTS # make sure we don't miss an interrupt 247 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
236 # setting need_resched or sigpending 248 # setting need_resched or sigpending
@@ -327,9 +339,16 @@ sysenter_past_esp:
327 movl PT_OLDESP(%esp), %ecx 339 movl PT_OLDESP(%esp), %ecx
328 xorl %ebp,%ebp 340 xorl %ebp,%ebp
329 TRACE_IRQS_ON 341 TRACE_IRQS_ON
3421: mov PT_GS(%esp), %gs
330 ENABLE_INTERRUPTS_SYSEXIT 343 ENABLE_INTERRUPTS_SYSEXIT
331 CFI_ENDPROC 344 CFI_ENDPROC
332 345.pushsection .fixup,"ax"
3462: movl $0,PT_GS(%esp)
347 jmp 1b
348.section __ex_table,"a"
349 .align 4
350 .long 1b,2b
351.popsection
333 352
334 # system call handler stub 353 # system call handler stub
335ENTRY(system_call) 354ENTRY(system_call)
@@ -375,7 +394,7 @@ restore_nocheck:
375 TRACE_IRQS_IRET 394 TRACE_IRQS_IRET
376restore_nocheck_notrace: 395restore_nocheck_notrace:
377 RESTORE_REGS 396 RESTORE_REGS
378 addl $4, %esp 397 addl $4, %esp # skip orig_eax/error_code
379 CFI_ADJUST_CFA_OFFSET -4 398 CFI_ADJUST_CFA_OFFSET -4
3801: INTERRUPT_RETURN 3991: INTERRUPT_RETURN
381.section .fixup,"ax" 400.section .fixup,"ax"
@@ -588,6 +607,10 @@ KPROBE_ENTRY(page_fault)
588 CFI_ADJUST_CFA_OFFSET 4 607 CFI_ADJUST_CFA_OFFSET 4
589 ALIGN 608 ALIGN
590error_code: 609error_code:
610 /* the function address is in %gs's slot on the stack */
611 pushl %es
612 CFI_ADJUST_CFA_OFFSET 4
613 /*CFI_REL_OFFSET es, 0*/
591 pushl %ds 614 pushl %ds
592 CFI_ADJUST_CFA_OFFSET 4 615 CFI_ADJUST_CFA_OFFSET 4
593 /*CFI_REL_OFFSET ds, 0*/ 616 /*CFI_REL_OFFSET ds, 0*/
@@ -613,18 +636,20 @@ error_code:
613 CFI_ADJUST_CFA_OFFSET 4 636 CFI_ADJUST_CFA_OFFSET 4
614 CFI_REL_OFFSET ebx, 0 637 CFI_REL_OFFSET ebx, 0
615 cld 638 cld
616 pushl %es 639 pushl %gs
617 CFI_ADJUST_CFA_OFFSET 4 640 CFI_ADJUST_CFA_OFFSET 4
618 /*CFI_REL_OFFSET es, 0*/ 641 /*CFI_REL_OFFSET gs, 0*/
642 movl $(__KERNEL_PDA), %ecx
643 movl %ecx, %gs
619 UNWIND_ESPFIX_STACK 644 UNWIND_ESPFIX_STACK
620 popl %ecx 645 popl %ecx
621 CFI_ADJUST_CFA_OFFSET -4 646 CFI_ADJUST_CFA_OFFSET -4
622 /*CFI_REGISTER es, ecx*/ 647 /*CFI_REGISTER es, ecx*/
623 movl PT_ES(%esp), %edi # get the function address 648 movl PT_GS(%esp), %edi # get the function address
624 movl PT_ORIG_EAX(%esp), %edx # get the error code 649 movl PT_ORIG_EAX(%esp), %edx # get the error code
625 movl $-1, PT_ORIG_EAX(%esp) 650 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
626 movl %ecx, PT_ES(%esp) 651 mov %ecx, PT_GS(%esp)
627 /*CFI_REL_OFFSET es, ES*/ 652 /*CFI_REL_OFFSET gs, ES*/
628 movl $(__USER_DS), %ecx 653 movl $(__USER_DS), %ecx
629 movl %ecx, %ds 654 movl %ecx, %ds
630 movl %ecx, %es 655 movl %ecx, %es
@@ -936,6 +961,7 @@ ENTRY(arch_unwind_init_running)
936 movl %ebx, PT_EAX(%edx) 961 movl %ebx, PT_EAX(%edx)
937 movl $__USER_DS, PT_DS(%edx) 962 movl $__USER_DS, PT_DS(%edx)
938 movl $__USER_DS, PT_ES(%edx) 963 movl $__USER_DS, PT_ES(%edx)
964 movl $0, PT_GS(%edx)
939 movl %ebx, PT_ORIG_EAX(%edx) 965 movl %ebx, PT_ORIG_EAX(%edx)
940 movl %ecx, PT_EIP(%edx) 966 movl %ecx, PT_EIP(%edx)
941 movl 12(%esp), %ecx 967 movl 12(%esp), %ecx
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index 4a83384c5a61..5b14e95ac8b9 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -302,6 +302,7 @@ is386: movl $2,%ecx # set MP
302 movl %eax,%cr0 302 movl %eax,%cr0
303 303
304 call check_x87 304 call check_x87
305 call setup_pda
305 lgdt cpu_gdt_descr 306 lgdt cpu_gdt_descr
306 lidt idt_descr 307 lidt idt_descr
307 ljmp $(__KERNEL_CS),$1f 308 ljmp $(__KERNEL_CS),$1f
@@ -312,10 +313,13 @@ is386: movl $2,%ecx # set MP
312 movl %eax,%ds 313 movl %eax,%ds
313 movl %eax,%es 314 movl %eax,%es
314 315
315 xorl %eax,%eax # Clear FS/GS and LDT 316 xorl %eax,%eax # Clear FS and LDT
316 movl %eax,%fs 317 movl %eax,%fs
317 movl %eax,%gs
318 lldt %ax 318 lldt %ax
319
320 movl $(__KERNEL_PDA),%eax
321 mov %eax,%gs
322
319 cld # gcc2 wants the direction flag cleared at all times 323 cld # gcc2 wants the direction flag cleared at all times
320 pushl $0 # fake return address for unwinder 324 pushl $0 # fake return address for unwinder
321#ifdef CONFIG_SMP 325#ifdef CONFIG_SMP
@@ -346,6 +350,23 @@ check_x87:
346 ret 350 ret
347 351
348/* 352/*
353 * Point the GDT at this CPU's PDA. On boot this will be
354 * cpu_gdt_table and boot_pda; for secondary CPUs, these will be
355 * that CPU's GDT and PDA.
356 */
357setup_pda:
358 /* get the PDA pointer */
359 movl start_pda, %eax
360
361 /* slot the PDA address into the GDT */
362 mov cpu_gdt_descr+2, %ecx
363 mov %ax, (__KERNEL_PDA+0+2)(%ecx) /* base & 0x0000ffff */
364 shr $16, %eax
365 mov %al, (__KERNEL_PDA+4+0)(%ecx) /* base & 0x00ff0000 */
366 mov %ah, (__KERNEL_PDA+4+3)(%ecx) /* base & 0xff000000 */
367 ret
368
369/*
349 * setup_idt 370 * setup_idt
350 * 371 *
351 * sets up a idt with 256 entries pointing to 372 * sets up a idt with 256 entries pointing to
@@ -484,6 +505,8 @@ ENTRY(empty_zero_page)
484 * This starts the data section. 505 * This starts the data section.
485 */ 506 */
486.data 507.data
508ENTRY(start_pda)
509 .long boot_pda
487 510
488ENTRY(stack_start) 511ENTRY(stack_start)
489 .long init_thread_union+THREAD_SIZE 512 .long init_thread_union+THREAD_SIZE
@@ -525,7 +548,7 @@ idt_descr:
525 548
526# boot GDT descriptor (later on used by CPU#0): 549# boot GDT descriptor (later on used by CPU#0):
527 .word 0 # 32 bit align gdt_desc.address 550 .word 0 # 32 bit align gdt_desc.address
528cpu_gdt_descr: 551ENTRY(cpu_gdt_descr)
529 .word GDT_ENTRIES*8-1 552 .word GDT_ENTRIES*8-1
530 .long cpu_gdt_table 553 .long cpu_gdt_table
531 554
@@ -585,7 +608,7 @@ ENTRY(cpu_gdt_table)
585 .quad 0x004092000000ffff /* 0xc8 APM DS data */ 608 .quad 0x004092000000ffff /* 0xc8 APM DS data */
586 609
587 .quad 0x00c0920000000000 /* 0xd0 - ESPFIX SS */ 610 .quad 0x00c0920000000000 /* 0xd0 - ESPFIX SS */
588 .quad 0x0000000000000000 /* 0xd8 - PDA */ 611 .quad 0x00cf92000000ffff /* 0xd8 - PDA */
589 .quad 0x0000000000000000 /* 0xe0 - unused */ 612 .quad 0x0000000000000000 /* 0xe0 - unused */
590 .quad 0x0000000000000000 /* 0xe8 - unused */ 613 .quad 0x0000000000000000 /* 0xe8 - unused */
591 .quad 0x0000000000000000 /* 0xf0 - unused */ 614 .quad 0x0000000000000000 /* 0xf0 - unused */
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index ae924c416b68..905364d42847 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -56,6 +56,7 @@
56 56
57#include <asm/tlbflush.h> 57#include <asm/tlbflush.h>
58#include <asm/cpu.h> 58#include <asm/cpu.h>
59#include <asm/pda.h>
59 60
60asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 61asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
61 62
@@ -346,6 +347,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
346 347
347 regs.xds = __USER_DS; 348 regs.xds = __USER_DS;
348 regs.xes = __USER_DS; 349 regs.xes = __USER_DS;
350 regs.xgs = __KERNEL_PDA;
349 regs.orig_eax = -1; 351 regs.orig_eax = -1;
350 regs.eip = (unsigned long) kernel_thread_helper; 352 regs.eip = (unsigned long) kernel_thread_helper;
351 regs.xcs = __KERNEL_CS | get_kernel_rpl(); 353 regs.xcs = __KERNEL_CS | get_kernel_rpl();
@@ -431,7 +433,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
431 p->thread.eip = (unsigned long) ret_from_fork; 433 p->thread.eip = (unsigned long) ret_from_fork;
432 434
433 savesegment(fs,p->thread.fs); 435 savesegment(fs,p->thread.fs);
434 savesegment(gs,p->thread.gs);
435 436
436 tsk = current; 437 tsk = current;
437 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 438 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -659,16 +660,16 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
659 load_esp0(tss, next); 660 load_esp0(tss, next);
660 661
661 /* 662 /*
662 * Save away %fs and %gs. No need to save %es and %ds, as 663 * Save away %fs. No need to save %gs, as it was saved on the
663 * those are always kernel segments while inside the kernel. 664 * stack on entry. No need to save %es and %ds, as those are
664 * Doing this before setting the new TLS descriptors avoids 665 * always kernel segments while inside the kernel. Doing this
665 * the situation where we temporarily have non-reloadable 666 * before setting the new TLS descriptors avoids the situation
666 * segments in %fs and %gs. This could be an issue if the 667 * where we temporarily have non-reloadable segments in %fs
667 * NMI handler ever used %fs or %gs (it does not today), or 668 * and %gs. This could be an issue if the NMI handler ever
668 * if the kernel is running inside of a hypervisor layer. 669 * used %fs or %gs (it does not today), or if the kernel is
670 * running inside of a hypervisor layer.
669 */ 671 */
670 savesegment(fs, prev->fs); 672 savesegment(fs, prev->fs);
671 savesegment(gs, prev->gs);
672 673
673 /* 674 /*
674 * Load the per-thread Thread-Local Storage descriptor. 675 * Load the per-thread Thread-Local Storage descriptor.
@@ -676,16 +677,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
676 load_TLS(next, cpu); 677 load_TLS(next, cpu);
677 678
678 /* 679 /*
679 * Restore %fs and %gs if needed. 680 * Restore %fs if needed.
680 * 681 *
681 * Glibc normally makes %fs be zero, and %gs is one of 682 * Glibc normally makes %fs be zero.
682 * the TLS segments.
683 */ 683 */
684 if (unlikely(prev->fs | next->fs)) 684 if (unlikely(prev->fs | next->fs))
685 loadsegment(fs, next->fs); 685 loadsegment(fs, next->fs);
686 686
687 if (prev->gs | next->gs)
688 loadsegment(gs, next->gs);
689 687
690 /* 688 /*
691 * Restore IOPL if needed. 689 * Restore IOPL if needed.
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 43002cfb40c4..65d7620eaa09 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -128,7 +128,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
128 X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ 128 X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
129 X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) 129 X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
130 130
131 GET_SEG(gs); 131 COPY_SEG(gs);
132 GET_SEG(fs); 132 GET_SEG(fs);
133 COPY_SEG(es); 133 COPY_SEG(es);
134 COPY_SEG(ds); 134 COPY_SEG(ds);
@@ -244,9 +244,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
244{ 244{
245 int tmp, err = 0; 245 int tmp, err = 0;
246 246
247 tmp = 0; 247 err |= __put_user(regs->xgs, (unsigned int __user *)&sc->gs);
248 savesegment(gs, tmp);
249 err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
250 savesegment(fs, tmp); 248 savesegment(fs, tmp);
251 err |= __put_user(tmp, (unsigned int __user *)&sc->fs); 249 err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
252 250
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 1b1495372c4d..68ff102d6f5e 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -62,8 +62,8 @@ static inline void switch_mm(struct mm_struct *prev,
62#endif 62#endif
63} 63}
64 64
65#define deactivate_mm(tsk, mm) \ 65#define deactivate_mm(tsk, mm) \
66 asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) 66 asm("movl %0,%%fs": :"r" (0));
67 67
68#define activate_mm(prev, next) \ 68#define activate_mm(prev, next) \
69 switch_mm((prev),(next),NULL) 69 switch_mm((prev),(next),NULL)
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index a9f2041c7c87..f73cf836e649 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -473,6 +473,7 @@ struct thread_struct {
473 .vm86_info = NULL, \ 473 .vm86_info = NULL, \
474 .sysenter_cs = __KERNEL_CS, \ 474 .sysenter_cs = __KERNEL_CS, \
475 .io_bitmap_ptr = NULL, \ 475 .io_bitmap_ptr = NULL, \
476 .gs = __KERNEL_PDA, \
476} 477}
477 478
478/* 479/*
@@ -500,7 +501,8 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
500} 501}
501 502
502#define start_thread(regs, new_eip, new_esp) do { \ 503#define start_thread(regs, new_eip, new_esp) do { \
503 __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ 504 __asm__("movl %0,%%fs": :"r" (0)); \
505 regs->xgs = 0; \
504 set_fs(USER_DS); \ 506 set_fs(USER_DS); \
505 regs->xds = __USER_DS; \ 507 regs->xds = __USER_DS; \
506 regs->xes = __USER_DS; \ 508 regs->xes = __USER_DS; \
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index d505f501077a..bdbc894339b4 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -16,6 +16,8 @@ struct pt_regs {
16 long eax; 16 long eax;
17 int xds; 17 int xds;
18 int xes; 18 int xes;
19 /* int xfs; */
20 int xgs;
19 long orig_eax; 21 long orig_eax;
20 long eip; 22 long eip;
21 int xcs; 23 int xcs;
diff --git a/kernel/fork.c b/kernel/fork.c
index 8cdd3e72ba55..fd22245e3881 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1303,7 +1303,7 @@ fork_out:
1303 return ERR_PTR(retval); 1303 return ERR_PTR(retval);
1304} 1304}
1305 1305
1306struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs) 1306noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1307{ 1307{
1308 memset(regs, 0, sizeof(struct pt_regs)); 1308 memset(regs, 0, sizeof(struct pt_regs));
1309 return regs; 1309 return regs;