diff options
| -rw-r--r-- | arch/x86/include/asm/suspend_32.h | 8 | ||||
| -rw-r--r-- | arch/x86/include/asm/suspend_64.h | 16 | ||||
| -rw-r--r-- | arch/x86/power/cpu.c | 79 |
3 files changed, 62 insertions, 41 deletions
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index 982c325dad33..8be6afb58471 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h | |||
| @@ -12,7 +12,13 @@ | |||
| 12 | 12 | ||
| 13 | /* image of the saved processor state */ | 13 | /* image of the saved processor state */ |
| 14 | struct saved_context { | 14 | struct saved_context { |
| 15 | u16 es, fs, gs, ss; | 15 | /* |
| 16 | * On x86_32, all segment registers, with the possible exception of | ||
| 17 | * gs, are saved at kernel entry in pt_regs. | ||
| 18 | */ | ||
| 19 | #ifdef CONFIG_X86_32_LAZY_GS | ||
| 20 | u16 gs; | ||
| 21 | #endif | ||
| 16 | unsigned long cr0, cr2, cr3, cr4; | 22 | unsigned long cr0, cr2, cr3, cr4; |
| 17 | u64 misc_enable; | 23 | u64 misc_enable; |
| 18 | bool misc_enable_saved; | 24 | bool misc_enable_saved; |
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h index 600e9e0aea51..a7af9f53c0cb 100644 --- a/arch/x86/include/asm/suspend_64.h +++ b/arch/x86/include/asm/suspend_64.h | |||
| @@ -20,8 +20,20 @@ | |||
| 20 | */ | 20 | */ |
| 21 | struct saved_context { | 21 | struct saved_context { |
| 22 | struct pt_regs regs; | 22 | struct pt_regs regs; |
| 23 | u16 ds, es, fs, gs, ss; | 23 | |
| 24 | unsigned long gs_base, gs_kernel_base, fs_base; | 24 | /* |
| 25 | * User CS and SS are saved in current_pt_regs(). The rest of the | ||
| 26 | * segment selectors need to be saved and restored here. | ||
| 27 | */ | ||
| 28 | u16 ds, es, fs, gs; | ||
| 29 | |||
| 30 | /* | ||
| 31 | * Usermode FSBASE and GSBASE may not match the fs and gs selectors, | ||
| 32 | * so we save them separately. We save the kernelmode GSBASE to | ||
| 33 | * restore percpu access after resume. | ||
| 34 | */ | ||
| 35 | unsigned long kernelmode_gs_base, usermode_gs_base, fs_base; | ||
| 36 | |||
| 25 | unsigned long cr0, cr2, cr3, cr4, cr8; | 37 | unsigned long cr0, cr2, cr3, cr4, cr8; |
| 26 | u64 misc_enable; | 38 | u64 misc_enable; |
| 27 | bool misc_enable_saved; | 39 | bool misc_enable_saved; |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 033c61e6891b..36a28eddb435 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
| @@ -99,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
| 99 | /* | 99 | /* |
| 100 | * segment registers | 100 | * segment registers |
| 101 | */ | 101 | */ |
| 102 | #ifdef CONFIG_X86_32 | 102 | #ifdef CONFIG_X86_32_LAZY_GS |
| 103 | savesegment(es, ctxt->es); | ||
| 104 | savesegment(fs, ctxt->fs); | ||
| 105 | savesegment(gs, ctxt->gs); | 103 | savesegment(gs, ctxt->gs); |
| 106 | savesegment(ss, ctxt->ss); | 104 | #endif |
| 107 | #else | 105 | #ifdef CONFIG_X86_64 |
| 108 | /* CONFIG_X86_64 */ | 106 | savesegment(gs, ctxt->gs); |
| 109 | asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); | 107 | savesegment(fs, ctxt->fs); |
| 110 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); | 108 | savesegment(ds, ctxt->ds); |
| 111 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); | 109 | savesegment(es, ctxt->es); |
| 112 | asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); | ||
| 113 | asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); | ||
| 114 | 110 | ||
| 115 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); | 111 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); |
| 116 | rdmsrl(MSR_GS_BASE, ctxt->gs_base); | 112 | rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); |
| 117 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | 113 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); |
| 118 | mtrr_save_fixed_ranges(NULL); | 114 | mtrr_save_fixed_ranges(NULL); |
| 119 | 115 | ||
| 120 | rdmsrl(MSR_EFER, ctxt->efer); | 116 | rdmsrl(MSR_EFER, ctxt->efer); |
| @@ -189,9 +185,12 @@ static void fix_processor_context(void) | |||
| 189 | } | 185 | } |
| 190 | 186 | ||
| 191 | /** | 187 | /** |
| 192 | * __restore_processor_state - restore the contents of CPU registers saved | 188 | * __restore_processor_state - restore the contents of CPU registers saved |
| 193 | * by __save_processor_state() | 189 | * by __save_processor_state() |
| 194 | * @ctxt - structure to load the registers contents from | 190 | * @ctxt - structure to load the registers contents from |
| 191 | * | ||
| 192 | * The asm code that gets us here will have restored a usable GDT, although | ||
| 193 | * it will be pointing to the wrong alias. | ||
| 195 | */ | 194 | */ |
| 196 | static void notrace __restore_processor_state(struct saved_context *ctxt) | 195 | static void notrace __restore_processor_state(struct saved_context *ctxt) |
| 197 | { | 196 | { |
| @@ -214,46 +213,50 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) | |||
| 214 | write_cr2(ctxt->cr2); | 213 | write_cr2(ctxt->cr2); |
| 215 | write_cr0(ctxt->cr0); | 214 | write_cr0(ctxt->cr0); |
| 216 | 215 | ||
| 216 | /* Restore the IDT. */ | ||
| 217 | load_idt(&ctxt->idt); | ||
| 218 | |||
| 217 | /* | 219 | /* |
| 218 | * now restore the descriptor tables to their proper values | 220 | * Just in case the asm code got us here with the SS, DS, or ES |
| 219 | * ltr is done i fix_processor_context(). | 221 | * out of sync with the GDT, update them. |
| 220 | */ | 222 | */ |
| 221 | load_idt(&ctxt->idt); | 223 | loadsegment(ss, __KERNEL_DS); |
| 224 | loadsegment(ds, __USER_DS); | ||
| 225 | loadsegment(es, __USER_DS); | ||
| 222 | 226 | ||
| 223 | #ifdef CONFIG_X86_64 | ||
| 224 | /* | 227 | /* |
| 225 | * We need GSBASE restored before percpu access can work. | 228 | * Restore percpu access. Percpu access can happen in exception |
| 226 | * percpu access can happen in exception handlers or in complicated | 229 | * handlers or in complicated helpers like load_gs_index(). |
| 227 | * helpers like load_gs_index(). | ||
| 228 | */ | 230 | */ |
| 229 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); | 231 | #ifdef CONFIG_X86_64 |
| 232 | wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); | ||
| 233 | #else | ||
| 234 | loadsegment(fs, __KERNEL_PERCPU); | ||
| 235 | loadsegment(gs, __KERNEL_STACK_CANARY); | ||
| 230 | #endif | 236 | #endif |
| 231 | 237 | ||
| 238 | /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ | ||
| 232 | fix_processor_context(); | 239 | fix_processor_context(); |
| 233 | 240 | ||
| 234 | /* | 241 | /* |
| 235 | * Restore segment registers. This happens after restoring the GDT | 242 | * Now that we have descriptor tables fully restored and working |
| 236 | * and LDT, which happen in fix_processor_context(). | 243 | * exception handling, restore the usermode segments. |
| 237 | */ | 244 | */ |
| 238 | #ifdef CONFIG_X86_32 | 245 | #ifdef CONFIG_X86_64 |
| 246 | loadsegment(ds, ctxt->es); | ||
| 239 | loadsegment(es, ctxt->es); | 247 | loadsegment(es, ctxt->es); |
| 240 | loadsegment(fs, ctxt->fs); | 248 | loadsegment(fs, ctxt->fs); |
| 241 | loadsegment(gs, ctxt->gs); | ||
| 242 | loadsegment(ss, ctxt->ss); | ||
| 243 | #else | ||
| 244 | /* CONFIG_X86_64 */ | ||
| 245 | asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); | ||
| 246 | asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); | ||
| 247 | asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); | ||
| 248 | load_gs_index(ctxt->gs); | 249 | load_gs_index(ctxt->gs); |
| 249 | asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); | ||
| 250 | 250 | ||
| 251 | /* | 251 | /* |
| 252 | * Restore FSBASE and user GSBASE after reloading the respective | 252 | * Restore FSBASE and GSBASE after restoring the selectors, since |
| 253 | * segment selectors. | 253 | * restoring the selectors clobbers the bases. Keep in mind |
| 254 | * that MSR_KERNEL_GS_BASE is horribly misnamed. | ||
| 254 | */ | 255 | */ |
| 255 | wrmsrl(MSR_FS_BASE, ctxt->fs_base); | 256 | wrmsrl(MSR_FS_BASE, ctxt->fs_base); |
| 256 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | 257 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); |
| 258 | #elif defined(CONFIG_X86_32_LAZY_GS) | ||
| 259 | loadsegment(gs, ctxt->gs); | ||
| 257 | #endif | 260 | #endif |
| 258 | 261 | ||
| 259 | do_fpu_end(); | 262 | do_fpu_end(); |
