aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/power
diff options
context:
space:
mode:
authorSergio Luis <sergio@larces.uece.br>2009-04-27 18:27:05 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2009-06-12 15:32:30 -0400
commit3134d04b7790f7239b221f16c2d97db4d96ac3c0 (patch)
tree45cb28131345c0556098c8b77c552cb8a9de64e3 /arch/x86/power
parentf9ebbe53e79c5978d0e8ead0843a3717b41ad3d5 (diff)
x86: unify power/cpu_(32|64) regarding restoring processor state
In this step we do unify cpu_32.c and cpu_64.c functions that work on restoring the saved processor state. Also, we do eliminate the forward declaration of fix_processor_context() for X86_64, as it's not needed anymore. Signed-off-by: Sergio Luis <sergio@larces.uece.br> Signed-off-by: Lauro Salmito <laurosalmito@gmail.com> Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'arch/x86/power')
-rw-r--r--arch/x86/power/cpu_32.c55
-rw-r--r--arch/x86/power/cpu_64.c118
2 files changed, 135 insertions, 38 deletions
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c
index 294e78baff75..29b9c0a1ca62 100644
--- a/arch/x86/power/cpu_32.c
+++ b/arch/x86/power/cpu_32.c
@@ -27,8 +27,6 @@ unsigned long saved_context_esi, saved_context_edi;
27unsigned long saved_context_eflags; 27unsigned long saved_context_eflags;
28#else 28#else
29/* CONFIG_X86_64 */ 29/* CONFIG_X86_64 */
30static void fix_processor_context(void);
31
32struct saved_context saved_context; 30struct saved_context saved_context;
33#endif 31#endif
34 32
@@ -136,6 +134,11 @@ static void fix_processor_context(void)
136 * similar stupidity. 134 * similar stupidity.
137 */ 135 */
138 136
137#ifdef CONFIG_X86_64
138 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
139
140 syscall_init(); /* This sets MSR_*STAR and related */
141#endif
139 load_TR_desc(); /* This does ltr */ 142 load_TR_desc(); /* This does ltr */
140 load_LDT(&current->active_mm->context); /* This does lldt */ 143 load_LDT(&current->active_mm->context); /* This does lldt */
141 144
@@ -143,6 +146,7 @@ static void fix_processor_context(void)
143 * Now maybe reload the debug registers 146 * Now maybe reload the debug registers
144 */ 147 */
145 if (current->thread.debugreg7) { 148 if (current->thread.debugreg7) {
149#ifdef CONFIG_X86_32
146 set_debugreg(current->thread.debugreg0, 0); 150 set_debugreg(current->thread.debugreg0, 0);
147 set_debugreg(current->thread.debugreg1, 1); 151 set_debugreg(current->thread.debugreg1, 1);
148 set_debugreg(current->thread.debugreg2, 2); 152 set_debugreg(current->thread.debugreg2, 2);
@@ -150,18 +154,40 @@ static void fix_processor_context(void)
150 /* no 4 and 5 */ 154 /* no 4 and 5 */
151 set_debugreg(current->thread.debugreg6, 6); 155 set_debugreg(current->thread.debugreg6, 6);
152 set_debugreg(current->thread.debugreg7, 7); 156 set_debugreg(current->thread.debugreg7, 7);
157#else
158 /* CONFIG_X86_64 */
159 loaddebug(&current->thread, 0);
160 loaddebug(&current->thread, 1);
161 loaddebug(&current->thread, 2);
162 loaddebug(&current->thread, 3);
163 /* no 4 and 5 */
164 loaddebug(&current->thread, 6);
165 loaddebug(&current->thread, 7);
166#endif
153 } 167 }
154 168
155} 169}
156 170
171/**
172 * __restore_processor_state - restore the contents of CPU registers saved
173 * by __save_processor_state()
174 * @ctxt - structure to load the registers contents from
175 */
157static void __restore_processor_state(struct saved_context *ctxt) 176static void __restore_processor_state(struct saved_context *ctxt)
158{ 177{
159 /* 178 /*
160 * control registers 179 * control registers
161 */ 180 */
162 /* cr4 was introduced in the Pentium CPU */ 181 /* cr4 was introduced in the Pentium CPU */
182#ifdef CONFIG_X86_32
163 if (ctxt->cr4) 183 if (ctxt->cr4)
164 write_cr4(ctxt->cr4); 184 write_cr4(ctxt->cr4);
185#else
186/* CONFIG X86_64 */
187 wrmsrl(MSR_EFER, ctxt->efer);
188 write_cr8(ctxt->cr8);
189 write_cr4(ctxt->cr4);
190#endif
165 write_cr3(ctxt->cr3); 191 write_cr3(ctxt->cr3);
166 write_cr2(ctxt->cr2); 192 write_cr2(ctxt->cr2);
167 write_cr0(ctxt->cr0); 193 write_cr0(ctxt->cr0);
@@ -170,12 +196,19 @@ static void __restore_processor_state(struct saved_context *ctxt)
170 * now restore the descriptor tables to their proper values 196 * now restore the descriptor tables to their proper values
171 * ltr is done i fix_processor_context(). 197 * ltr is done i fix_processor_context().
172 */ 198 */
199#ifdef CONFIG_X86_32
173 load_gdt(&ctxt->gdt); 200 load_gdt(&ctxt->gdt);
174 load_idt(&ctxt->idt); 201 load_idt(&ctxt->idt);
202#else
203/* CONFIG_X86_64 */
204 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
205 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
206#endif
175 207
176 /* 208 /*
177 * segment registers 209 * segment registers
178 */ 210 */
211#ifdef CONFIG_X86_32
179 loadsegment(es, ctxt->es); 212 loadsegment(es, ctxt->es);
180 loadsegment(fs, ctxt->fs); 213 loadsegment(fs, ctxt->fs);
181 loadsegment(gs, ctxt->gs); 214 loadsegment(gs, ctxt->gs);
@@ -186,6 +219,18 @@ static void __restore_processor_state(struct saved_context *ctxt)
186 */ 219 */
187 if (boot_cpu_has(X86_FEATURE_SEP)) 220 if (boot_cpu_has(X86_FEATURE_SEP))
188 enable_sep_cpu(); 221 enable_sep_cpu();
222#else
223/* CONFIG_X86_64 */
224 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
225 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
226 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
227 load_gs_index(ctxt->gs);
228 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
229
230 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
231 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
232 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
233#endif
189 234
190 /* 235 /*
191 * restore XCR0 for xsave capable cpu's. 236 * restore XCR0 for xsave capable cpu's.
@@ -194,9 +239,13 @@ static void __restore_processor_state(struct saved_context *ctxt)
194 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); 239 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
195 240
196 fix_processor_context(); 241 fix_processor_context();
242
197 do_fpu_end(); 243 do_fpu_end();
198 mtrr_ap_init(); 244 mtrr_ap_init();
245
246#ifdef CONFIG_X86_32
199 mcheck_init(&boot_cpu_data); 247 mcheck_init(&boot_cpu_data);
248#endif
200} 249}
201 250
202/* Needed by apm.c */ 251/* Needed by apm.c */
@@ -204,4 +253,6 @@ void restore_processor_state(void)
204{ 253{
205 __restore_processor_state(&saved_context); 254 __restore_processor_state(&saved_context);
206} 255}
256#ifdef CONFIG_X86_32
207EXPORT_SYMBOL(restore_processor_state); 257EXPORT_SYMBOL(restore_processor_state);
258#endif
diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c
index 11ea7d0ba5d9..5c8bdc002b8b 100644
--- a/arch/x86/power/cpu_64.c
+++ b/arch/x86/power/cpu_64.c
@@ -28,8 +28,6 @@ unsigned long saved_context_esi, saved_context_edi;
28unsigned long saved_context_eflags; 28unsigned long saved_context_eflags;
29#else 29#else
30/* CONFIG_X86_64 */ 30/* CONFIG_X86_64 */
31static void fix_processor_context(void);
32
33struct saved_context saved_context; 31struct saved_context saved_context;
34#endif 32#endif
35 33
@@ -120,11 +118,57 @@ EXPORT_SYMBOL(save_processor_state);
120static void do_fpu_end(void) 118static void do_fpu_end(void)
121{ 119{
122 /* 120 /*
123 * Restore FPU regs if necessary 121 * Restore FPU regs if necessary.
124 */ 122 */
125 kernel_fpu_end(); 123 kernel_fpu_end();
126} 124}
127 125
126static void fix_processor_context(void)
127{
128 int cpu = smp_processor_id();
129 struct tss_struct *t = &per_cpu(init_tss, cpu);
130
131 set_tss_desc(cpu, t); /*
132 * This just modifies memory; should not be
133 * necessary. But... This is necessary, because
134 * 386 hardware has concept of busy TSS or some
135 * similar stupidity.
136 */
137
138#ifdef CONFIG_X86_64
139 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
140
141 syscall_init(); /* This sets MSR_*STAR and related */
142#endif
143 load_TR_desc(); /* This does ltr */
144 load_LDT(&current->active_mm->context); /* This does lldt */
145
146 /*
147 * Now maybe reload the debug registers
148 */
149 if (current->thread.debugreg7) {
150#ifdef CONFIG_X86_32
151 set_debugreg(current->thread.debugreg0, 0);
152 set_debugreg(current->thread.debugreg1, 1);
153 set_debugreg(current->thread.debugreg2, 2);
154 set_debugreg(current->thread.debugreg3, 3);
155 /* no 4 and 5 */
156 set_debugreg(current->thread.debugreg6, 6);
157 set_debugreg(current->thread.debugreg7, 7);
158#else
159 /* CONFIG_X86_64 */
160 loaddebug(&current->thread, 0);
161 loaddebug(&current->thread, 1);
162 loaddebug(&current->thread, 2);
163 loaddebug(&current->thread, 3);
164 /* no 4 and 5 */
165 loaddebug(&current->thread, 6);
166 loaddebug(&current->thread, 7);
167#endif
168 }
169
170}
171
128/** 172/**
129 * __restore_processor_state - restore the contents of CPU registers saved 173 * __restore_processor_state - restore the contents of CPU registers saved
130 * by __save_processor_state() 174 * by __save_processor_state()
@@ -135,9 +179,16 @@ static void __restore_processor_state(struct saved_context *ctxt)
135 /* 179 /*
136 * control registers 180 * control registers
137 */ 181 */
182 /* cr4 was introduced in the Pentium CPU */
183#ifdef CONFIG_X86_32
184 if (ctxt->cr4)
185 write_cr4(ctxt->cr4);
186#else
187/* CONFIG X86_64 */
138 wrmsrl(MSR_EFER, ctxt->efer); 188 wrmsrl(MSR_EFER, ctxt->efer);
139 write_cr8(ctxt->cr8); 189 write_cr8(ctxt->cr8);
140 write_cr4(ctxt->cr4); 190 write_cr4(ctxt->cr4);
191#endif
141 write_cr3(ctxt->cr3); 192 write_cr3(ctxt->cr3);
142 write_cr2(ctxt->cr2); 193 write_cr2(ctxt->cr2);
143 write_cr0(ctxt->cr0); 194 write_cr0(ctxt->cr0);
@@ -146,13 +197,31 @@ static void __restore_processor_state(struct saved_context *ctxt)
146 * now restore the descriptor tables to their proper values 197 * now restore the descriptor tables to their proper values
147 * ltr is done i fix_processor_context(). 198 * ltr is done i fix_processor_context().
148 */ 199 */
200#ifdef CONFIG_X86_32
201 load_gdt(&ctxt->gdt);
202 load_idt(&ctxt->idt);
203#else
204/* CONFIG_X86_64 */
149 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit); 205 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
150 load_idt((const struct desc_ptr *)&ctxt->idt_limit); 206 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
151 207#endif
152 208
153 /* 209 /*
154 * segment registers 210 * segment registers
155 */ 211 */
212#ifdef CONFIG_X86_32
213 loadsegment(es, ctxt->es);
214 loadsegment(fs, ctxt->fs);
215 loadsegment(gs, ctxt->gs);
216 loadsegment(ss, ctxt->ss);
217
218 /*
219 * sysenter MSRs
220 */
221 if (boot_cpu_has(X86_FEATURE_SEP))
222 enable_sep_cpu();
223#else
224/* CONFIG_X86_64 */
156 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); 225 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
157 asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); 226 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
158 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); 227 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
@@ -162,6 +231,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
162 wrmsrl(MSR_FS_BASE, ctxt->fs_base); 231 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
163 wrmsrl(MSR_GS_BASE, ctxt->gs_base); 232 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
164 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 233 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
234#endif
165 235
166 /* 236 /*
167 * restore XCR0 for xsave capable cpu's. 237 * restore XCR0 for xsave capable cpu's.
@@ -173,41 +243,17 @@ static void __restore_processor_state(struct saved_context *ctxt)
173 243
174 do_fpu_end(); 244 do_fpu_end();
175 mtrr_ap_init(); 245 mtrr_ap_init();
246
247#ifdef CONFIG_X86_32
248 mcheck_init(&boot_cpu_data);
249#endif
176} 250}
177 251
252/* Needed by apm.c */
178void restore_processor_state(void) 253void restore_processor_state(void)
179{ 254{
180 __restore_processor_state(&saved_context); 255 __restore_processor_state(&saved_context);
181} 256}
182 257#ifdef CONFIG_X86_32
183static void fix_processor_context(void) 258EXPORT_SYMBOL(restore_processor_state);
184{ 259#endif
185 int cpu = smp_processor_id();
186 struct tss_struct *t = &per_cpu(init_tss, cpu);
187
188 /*
189 * This just modifies memory; should not be necessary. But... This
190 * is necessary, because 386 hardware has concept of busy TSS or some
191 * similar stupidity.
192 */
193 set_tss_desc(cpu, t);
194
195 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
196
197 syscall_init(); /* This sets MSR_*STAR and related */
198 load_TR_desc(); /* This does ltr */
199 load_LDT(&current->active_mm->context); /* This does lldt */
200
201 /*
202 * Now maybe reload the debug registers
203 */
204 if (current->thread.debugreg7){
205 loaddebug(&current->thread, 0);
206 loaddebug(&current->thread, 1);
207 loaddebug(&current->thread, 2);
208 loaddebug(&current->thread, 3);
209 /* no 4 and 5 */
210 loaddebug(&current->thread, 6);
211 loaddebug(&current->thread, 7);
212 }
213}