aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-06-25 00:19:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 07:10:25 -0400
commitada857082317e6883cfcf7deb4e0c54d3c447cb0 (patch)
tree0c87da9f4d6a1a96aa770ba6c54a80cfa088f9d2 /arch/x86/kernel/process_64.c
parentaf2b1c609ff52b6469d8e67696db98c93c348b0e (diff)
x86: remove open-coded save/load segment operations
This removes a pile of buggy open-coded implementations of savesegment and loadsegment. (They are buggy because they don't have memory barriers to prevent them from being reordered with respect to memory accesses.) Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: xen-devel <xen-devel@lists.xensource.com> Cc: Stephen Tweedie <sct@redhat.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 290183e9731a..ddc6fcc73dc6 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -335,10 +335,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
335 p->thread.fs = me->thread.fs; 335 p->thread.fs = me->thread.fs;
336 p->thread.gs = me->thread.gs; 336 p->thread.gs = me->thread.gs;
337 337
338 asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); 338 savesegment(gs, p->thread.gsindex);
339 asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); 339 savesegment(fs, p->thread.fsindex);
340 asm("mov %%es,%0" : "=m" (p->thread.es)); 340 savesegment(es, p->thread.es);
341 asm("mov %%ds,%0" : "=m" (p->thread.ds)); 341 savesegment(ds, p->thread.ds);
342 342
343 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { 343 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
344 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); 344 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
@@ -377,7 +377,9 @@ out:
377void 377void
378start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 378start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
379{ 379{
380 asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0)); 380 loadsegment(fs, 0);
381 loadsegment(es, 0);
382 loadsegment(ds, 0);
381 load_gs_index(0); 383 load_gs_index(0);
382 regs->ip = new_ip; 384 regs->ip = new_ip;
383 regs->sp = new_sp; 385 regs->sp = new_sp;
@@ -550,11 +552,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
550 * Switch DS and ES. 552 * Switch DS and ES.
551 * This won't pick up thread selector changes, but I guess that is ok. 553 * This won't pick up thread selector changes, but I guess that is ok.
552 */ 554 */
553 asm volatile("mov %%es,%0" : "=m" (prev->es)); 555 savesegment(es, prev->es);
554 if (unlikely(next->es | prev->es)) 556 if (unlikely(next->es | prev->es))
555 loadsegment(es, next->es); 557 loadsegment(es, next->es);
556 558
557 asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); 559 savesegment(ds, prev->ds);
558 if (unlikely(next->ds | prev->ds)) 560 if (unlikely(next->ds | prev->ds))
559 loadsegment(ds, next->ds); 561 loadsegment(ds, next->ds);
560 562
@@ -565,7 +567,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
565 */ 567 */
566 { 568 {
567 unsigned fsindex; 569 unsigned fsindex;
568 asm volatile("movl %%fs,%0" : "=r" (fsindex)); 570 savesegment(fs, fsindex);
569 /* segment register != 0 always requires a reload. 571 /* segment register != 0 always requires a reload.
570 also reload when it has changed. 572 also reload when it has changed.
571 when prev process used 64bit base always reload 573 when prev process used 64bit base always reload
@@ -586,7 +588,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
586 } 588 }
587 { 589 {
588 unsigned gsindex; 590 unsigned gsindex;
589 asm volatile("movl %%gs,%0" : "=r" (gsindex)); 591 savesegment(gs, gsindex);
590 if (unlikely(gsindex | next->gsindex | prev->gs)) { 592 if (unlikely(gsindex | next->gsindex | prev->gs)) {
591 load_gs_index(next->gsindex); 593 load_gs_index(next->gsindex);
592 if (gsindex) 594 if (gsindex)
@@ -767,7 +769,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
767 set_32bit_tls(task, FS_TLS, addr); 769 set_32bit_tls(task, FS_TLS, addr);
768 if (doit) { 770 if (doit) {
769 load_TLS(&task->thread, cpu); 771 load_TLS(&task->thread, cpu);
770 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL)); 772 loadsegment(fs, FS_TLS_SEL);
771 } 773 }
772 task->thread.fsindex = FS_TLS_SEL; 774 task->thread.fsindex = FS_TLS_SEL;
773 task->thread.fs = 0; 775 task->thread.fs = 0;
@@ -777,7 +779,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
777 if (doit) { 779 if (doit) {
778 /* set the selector to 0 to not confuse 780 /* set the selector to 0 to not confuse
779 __switch_to */ 781 __switch_to */
780 asm volatile("movl %0,%%fs" :: "r" (0)); 782 loadsegment(fs, 0);
781 ret = checking_wrmsrl(MSR_FS_BASE, addr); 783 ret = checking_wrmsrl(MSR_FS_BASE, addr);
782 } 784 }
783 } 785 }
@@ -800,7 +802,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
800 if (task->thread.gsindex == GS_TLS_SEL) 802 if (task->thread.gsindex == GS_TLS_SEL)
801 base = read_32bit_tls(task, GS_TLS); 803 base = read_32bit_tls(task, GS_TLS);
802 else if (doit) { 804 else if (doit) {
803 asm("movl %%gs,%0" : "=r" (gsindex)); 805 savesegment(gs, gsindex);
804 if (gsindex) 806 if (gsindex)
805 rdmsrl(MSR_KERNEL_GS_BASE, base); 807 rdmsrl(MSR_KERNEL_GS_BASE, base);
806 else 808 else