aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/power/cpu.c17
-rw-r--r--arch/x86_64/kernel/suspend.c18
2 files changed, 16 insertions, 19 deletions
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index d099d01461f4..0e6b45b61251 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -44,7 +44,6 @@ void __save_processor_state(struct saved_context *ctxt)
44 */ 44 */
45 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); 45 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
46 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); 46 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
47 asm volatile ("sldt %0" : "=m" (ctxt->ldt));
48 asm volatile ("str %0" : "=m" (ctxt->tr)); 47 asm volatile ("str %0" : "=m" (ctxt->tr));
49 48
50 /* 49 /*
@@ -107,7 +106,6 @@ static void fix_processor_context(void)
107 106
108void __restore_processor_state(struct saved_context *ctxt) 107void __restore_processor_state(struct saved_context *ctxt)
109{ 108{
110
111 /* 109 /*
112 * control registers 110 * control registers
113 */ 111 */
@@ -117,6 +115,13 @@ void __restore_processor_state(struct saved_context *ctxt)
117 asm volatile ("movl %0, %%cr0" :: "r" (ctxt->cr0)); 115 asm volatile ("movl %0, %%cr0" :: "r" (ctxt->cr0));
118 116
119 /* 117 /*
118 * now restore the descriptor tables to their proper values
119 * ltr is done i fix_processor_context().
120 */
121 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
122 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
123
124 /*
120 * segment registers 125 * segment registers
121 */ 126 */
122 asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); 127 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
@@ -125,14 +130,6 @@ void __restore_processor_state(struct saved_context *ctxt)
125 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); 130 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
126 131
127 /* 132 /*
128 * now restore the descriptor tables to their proper values
129 * ltr is done i fix_processor_context().
130 */
131 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
132 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
133 asm volatile ("lldt %0" :: "m" (ctxt->ldt));
134
135 /*
136 * sysenter MSRs 133 * sysenter MSRs
137 */ 134 */
138 if (boot_cpu_has(X86_FEATURE_SEP)) 135 if (boot_cpu_has(X86_FEATURE_SEP))
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index ebaa1e37d657..6c0f402e3a88 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -44,7 +44,6 @@ void __save_processor_state(struct saved_context *ctxt)
44 */ 44 */
45 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); 45 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
46 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); 46 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
47 asm volatile ("sldt %0" : "=m" (ctxt->ldt));
48 asm volatile ("str %0" : "=m" (ctxt->tr)); 47 asm volatile ("str %0" : "=m" (ctxt->tr));
49 48
50 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ 49 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
@@ -69,6 +68,7 @@ void __save_processor_state(struct saved_context *ctxt)
69 asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2)); 68 asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2));
70 asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3)); 69 asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3));
71 asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4)); 70 asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4));
71 asm volatile ("movq %%cr8, %0" : "=r" (ctxt->cr8));
72} 72}
73 73
74void save_processor_state(void) 74void save_processor_state(void)
@@ -90,12 +90,20 @@ void __restore_processor_state(struct saved_context *ctxt)
90 /* 90 /*
91 * control registers 91 * control registers
92 */ 92 */
93 asm volatile ("movq %0, %%cr8" :: "r" (ctxt->cr8));
93 asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4)); 94 asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4));
94 asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3)); 95 asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3));
95 asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2)); 96 asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2));
96 asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0)); 97 asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0));
97 98
98 /* 99 /*
100 * now restore the descriptor tables to their proper values
101 * ltr is done i fix_processor_context().
102 */
103 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
104 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
105
106 /*
99 * segment registers 107 * segment registers
100 */ 108 */
101 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); 109 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
@@ -108,14 +116,6 @@ void __restore_processor_state(struct saved_context *ctxt)
108 wrmsrl(MSR_GS_BASE, ctxt->gs_base); 116 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
109 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 117 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
110 118
111 /*
112 * now restore the descriptor tables to their proper values
113 * ltr is done i fix_processor_context().
114 */
115 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
116 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
117 asm volatile ("lldt %0" :: "m" (ctxt->ldt));
118
119 fix_processor_context(); 119 fix_processor_context();
120 120
121 do_fpu_end(); 121 do_fpu_end();