aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
authorH. J. Lu <hjl@lucon.org>2005-05-01 11:58:48 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-01 11:58:48 -0400
commitfd51f666fa591294bd7462447512666e61c56ea0 (patch)
tree0addf0006900152975c38bd75fdfd238c9179013 /arch/i386/kernel
parentd5b63d78f1e75f6c6f04862dfb2f2a4aeffafd4c (diff)
[PATCH] i386/x86_64 segment register access update
The new i386/x86_64 assemblers no longer accept instructions for moving between a segment register and a 32bit memory location, i.e., movl (%eax),%ds movl %ds,(%eax) To generate instructions for moving between a segment register and a 16bit memory location without the 16bit operand size prefix, 0x66, mov (%eax),%ds mov %ds,(%eax) should be used. It will work with both new and old assemblers. The assembler starting from 2.16.90.0.1 will also support movw (%eax),%ds movw %ds,(%eax) without the 0x66 prefix. I am enclosing patches for 2.4 and 2.6 kernels here. The resulting kernel binaries should be unchanged as before, with old and new assemblers, if gcc never generates memory access for unsigned gsindex; asm volatile("movl %%gs,%0" : "=g" (gsindex)); If gcc does generate memory access for the code above, the upper bits in gsindex are undefined and the new assembler doesn't allow it. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/process.c4
-rw-r--r--arch/i386/kernel/vm86.c4
2 files changed, 4 insertions, 4 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index b2203e21acb..85bd56d4431 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -611,8 +611,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
611 * Save away %fs and %gs. No need to save %es and %ds, as 611 * Save away %fs and %gs. No need to save %es and %ds, as
612 * those are always kernel segments while inside the kernel. 612 * those are always kernel segments while inside the kernel.
613 */ 613 */
614 asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); 614 asm volatile("mov %%fs,%0":"=m" (prev->fs));
615 asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); 615 asm volatile("mov %%gs,%0":"=m" (prev->gs));
616 616
617 /* 617 /*
618 * Restore %fs and %gs if needed. 618 * Restore %fs and %gs if needed.
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index 2f3d52dacff..d16cd3738a4 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
294 */ 294 */
295 info->regs32->eax = 0; 295 info->regs32->eax = 0;
296 tsk->thread.saved_esp0 = tsk->thread.esp0; 296 tsk->thread.saved_esp0 = tsk->thread.esp0;
297 asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs)); 297 asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs));
298 asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs)); 298 asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs));
299 299
300 tss = &per_cpu(init_tss, get_cpu()); 300 tss = &per_cpu(init_tss, get_cpu());
301 tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; 301 tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;