aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-06-25 00:19:24 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 07:11:11 -0400
commit478de5a9d691dd0c048ddce62dbec23722515636 (patch)
tree82d165c8420571925a0d56c92316f10a436f1831 /arch/x86/kernel/process_64.c
parent3fe0a63efd4437f6438ce5f2708929b1108873b6 (diff)
x86: save %fs and %gs before load_TLS() and arch_leave_lazy_cpu_mode()
We must do this because load_TLS() may need to clear %fs and %gs. (e.g. under Xen). Signed-off-by: Eduardo Habkost <ehabkost@redhat.com> Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: xen-devel <xen-devel@lists.xensource.com> Cc: Stephen Tweedie <sct@redhat.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 488eaca47bd8..db5eb963e4df 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -538,6 +538,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
538 *next = &next_p->thread; 538 *next = &next_p->thread;
539 int cpu = smp_processor_id(); 539 int cpu = smp_processor_id();
540 struct tss_struct *tss = &per_cpu(init_tss, cpu); 540 struct tss_struct *tss = &per_cpu(init_tss, cpu);
541 unsigned fsindex, gsindex;
541 542
542 /* we're going to use this soon, after a few expensive things */ 543 /* we're going to use this soon, after a few expensive things */
543 if (next_p->fpu_counter>5) 544 if (next_p->fpu_counter>5)
@@ -560,6 +561,15 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
560 if (unlikely(next->ds | prev->ds)) 561 if (unlikely(next->ds | prev->ds))
561 loadsegment(ds, next->ds); 562 loadsegment(ds, next->ds);
562 563
564
565 /* We must save %fs and %gs before load_TLS() because
566 * %fs and %gs may be cleared by load_TLS().
567 *
568 * (e.g. xen_load_tls())
569 */
570 savesegment(fs, fsindex);
571 savesegment(gs, gsindex);
572
563 load_TLS(next, cpu); 573 load_TLS(next, cpu);
564 574
565 /* 575 /*
@@ -575,8 +585,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
575 * Switch FS and GS. 585 * Switch FS and GS.
576 */ 586 */
577 { 587 {
578 unsigned fsindex;
579 savesegment(fs, fsindex);
580 /* segment register != 0 always requires a reload. 588 /* segment register != 0 always requires a reload.
581 also reload when it has changed. 589 also reload when it has changed.
582 when prev process used 64bit base always reload 590 when prev process used 64bit base always reload
@@ -594,10 +602,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
594 if (next->fs) 602 if (next->fs)
595 wrmsrl(MSR_FS_BASE, next->fs); 603 wrmsrl(MSR_FS_BASE, next->fs);
596 prev->fsindex = fsindex; 604 prev->fsindex = fsindex;
597 } 605
598 {
599 unsigned gsindex;
600 savesegment(gs, gsindex);
601 if (unlikely(gsindex | next->gsindex | prev->gs)) { 606 if (unlikely(gsindex | next->gsindex | prev->gs)) {
602 load_gs_index(next->gsindex); 607 load_gs_index(next->gsindex);
603 if (gsindex) 608 if (gsindex)