aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/process.c4
-rw-r--r--arch/i386/kernel/vm86.c4
-rw-r--r--arch/x86_64/kernel/process.c12
-rw-r--r--include/asm-i386/system.h6
4 files changed, 13 insertions, 13 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index b2203e21acb3..85bd56d44314 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -611,8 +611,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
611 * Save away %fs and %gs. No need to save %es and %ds, as 611 * Save away %fs and %gs. No need to save %es and %ds, as
612 * those are always kernel segments while inside the kernel. 612 * those are always kernel segments while inside the kernel.
613 */ 613 */
614 asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); 614 asm volatile("mov %%fs,%0":"=m" (prev->fs));
615 asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); 615 asm volatile("mov %%gs,%0":"=m" (prev->gs));
616 616
617 /* 617 /*
618 * Restore %fs and %gs if needed. 618 * Restore %fs and %gs if needed.
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index 2f3d52dacff7..d16cd3738a48 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
294 */ 294 */
295 info->regs32->eax = 0; 295 info->regs32->eax = 0;
296 tsk->thread.saved_esp0 = tsk->thread.esp0; 296 tsk->thread.saved_esp0 = tsk->thread.esp0;
297 asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs)); 297 asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs));
298 asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs)); 298 asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs));
299 299
300 tss = &per_cpu(init_tss, get_cpu()); 300 tss = &per_cpu(init_tss, get_cpu());
301 tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; 301 tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 9922d2ba24a3..761b6d35e338 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -402,10 +402,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
402 p->thread.fs = me->thread.fs; 402 p->thread.fs = me->thread.fs;
403 p->thread.gs = me->thread.gs; 403 p->thread.gs = me->thread.gs;
404 404
405 asm("movl %%gs,%0" : "=m" (p->thread.gsindex)); 405 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
406 asm("movl %%fs,%0" : "=m" (p->thread.fsindex)); 406 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
407 asm("movl %%es,%0" : "=m" (p->thread.es)); 407 asm("mov %%es,%0" : "=m" (p->thread.es));
408 asm("movl %%ds,%0" : "=m" (p->thread.ds)); 408 asm("mov %%ds,%0" : "=m" (p->thread.ds));
409 409
410 if (unlikely(me->thread.io_bitmap_ptr != NULL)) { 410 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
411 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); 411 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
@@ -468,11 +468,11 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
468 * Switch DS and ES. 468 * Switch DS and ES.
469 * This won't pick up thread selector changes, but I guess that is ok. 469 * This won't pick up thread selector changes, but I guess that is ok.
470 */ 470 */
471 asm volatile("movl %%es,%0" : "=m" (prev->es)); 471 asm volatile("mov %%es,%0" : "=m" (prev->es));
472 if (unlikely(next->es | prev->es)) 472 if (unlikely(next->es | prev->es))
473 loadsegment(es, next->es); 473 loadsegment(es, next->es);
474 474
475 asm volatile ("movl %%ds,%0" : "=m" (prev->ds)); 475 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
476 if (unlikely(next->ds | prev->ds)) 476 if (unlikely(next->ds | prev->ds))
477 loadsegment(ds, next->ds); 477 loadsegment(ds, next->ds);
478 478
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 6f74d4c44a0e..3db717a244f0 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -81,7 +81,7 @@ static inline unsigned long _get_base(char * addr)
81#define loadsegment(seg,value) \ 81#define loadsegment(seg,value) \
82 asm volatile("\n" \ 82 asm volatile("\n" \
83 "1:\t" \ 83 "1:\t" \
84 "movl %0,%%" #seg "\n" \ 84 "mov %0,%%" #seg "\n" \
85 "2:\n" \ 85 "2:\n" \
86 ".section .fixup,\"ax\"\n" \ 86 ".section .fixup,\"ax\"\n" \
87 "3:\t" \ 87 "3:\t" \
@@ -93,13 +93,13 @@ static inline unsigned long _get_base(char * addr)
93 ".align 4\n\t" \ 93 ".align 4\n\t" \
94 ".long 1b,3b\n" \ 94 ".long 1b,3b\n" \
95 ".previous" \ 95 ".previous" \
96 : :"m" (*(unsigned int *)&(value))) 96 : :"m" (value))
97 97
98/* 98/*
99 * Save a segment register away 99 * Save a segment register away
100 */ 100 */
101#define savesegment(seg, value) \ 101#define savesegment(seg, value) \
102 asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value))) 102 asm volatile("mov %%" #seg ",%0":"=m" (value))
103 103
104/* 104/*
105 * Clear and set 'TS' bit respectively 105 * Clear and set 'TS' bit respectively