diff options
-rw-r--r-- | arch/x86/power/cpu_32.c | 48 | ||||
-rw-r--r-- | arch/x86/power/cpu_64.c | 29 |
2 files changed, 76 insertions, 1 deletions
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index de1a86b2cffa..294e78baff75 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c | |||
@@ -32,25 +32,65 @@ static void fix_processor_context(void); | |||
32 | struct saved_context saved_context; | 32 | struct saved_context saved_context; |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /** | ||
36 | * __save_processor_state - save CPU registers before creating a | ||
37 | * hibernation image and before restoring the memory state from it | ||
38 | * @ctxt - structure to store the registers contents in | ||
39 | * | ||
40 | * NOTE: If there is a CPU register the modification of which by the | ||
41 | * boot kernel (ie. the kernel used for loading the hibernation image) | ||
42 | * might affect the operations of the restored target kernel (ie. the one | ||
43 | * saved in the hibernation image), then its contents must be saved by this | ||
44 | * function. In other words, if kernel A is hibernated and different | ||
45 | * kernel B is used for loading the hibernation image into memory, the | ||
46 | * kernel A's __save_processor_state() function must save all registers | ||
47 | * needed by kernel A, so that it can operate correctly after the resume | ||
48 | * regardless of what kernel B does in the meantime. | ||
49 | */ | ||
35 | static void __save_processor_state(struct saved_context *ctxt) | 50 | static void __save_processor_state(struct saved_context *ctxt) |
36 | { | 51 | { |
52 | #ifdef CONFIG_X86_32 | ||
37 | mtrr_save_fixed_ranges(NULL); | 53 | mtrr_save_fixed_ranges(NULL); |
54 | #endif | ||
38 | kernel_fpu_begin(); | 55 | kernel_fpu_begin(); |
39 | 56 | ||
40 | /* | 57 | /* |
41 | * descriptor tables | 58 | * descriptor tables |
42 | */ | 59 | */ |
60 | #ifdef CONFIG_X86_32 | ||
43 | store_gdt(&ctxt->gdt); | 61 | store_gdt(&ctxt->gdt); |
44 | store_idt(&ctxt->idt); | 62 | store_idt(&ctxt->idt); |
63 | #else | ||
64 | /* CONFIG_X86_64 */ | ||
65 | store_gdt((struct desc_ptr *)&ctxt->gdt_limit); | ||
66 | store_idt((struct desc_ptr *)&ctxt->idt_limit); | ||
67 | #endif | ||
45 | store_tr(ctxt->tr); | 68 | store_tr(ctxt->tr); |
46 | 69 | ||
70 | /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ | ||
47 | /* | 71 | /* |
48 | * segment registers | 72 | * segment registers |
49 | */ | 73 | */ |
74 | #ifdef CONFIG_X86_32 | ||
50 | savesegment(es, ctxt->es); | 75 | savesegment(es, ctxt->es); |
51 | savesegment(fs, ctxt->fs); | 76 | savesegment(fs, ctxt->fs); |
52 | savesegment(gs, ctxt->gs); | 77 | savesegment(gs, ctxt->gs); |
53 | savesegment(ss, ctxt->ss); | 78 | savesegment(ss, ctxt->ss); |
79 | #else | ||
80 | /* CONFIG_X86_64 */ | ||
81 | asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); | ||
82 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); | ||
83 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); | ||
84 | asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); | ||
85 | asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); | ||
86 | |||
87 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); | ||
88 | rdmsrl(MSR_GS_BASE, ctxt->gs_base); | ||
89 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | ||
90 | mtrr_save_fixed_ranges(NULL); | ||
91 | |||
92 | rdmsrl(MSR_EFER, ctxt->efer); | ||
93 | #endif | ||
54 | 94 | ||
55 | /* | 95 | /* |
56 | * control registers | 96 | * control registers |
@@ -58,7 +98,13 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
58 | ctxt->cr0 = read_cr0(); | 98 | ctxt->cr0 = read_cr0(); |
59 | ctxt->cr2 = read_cr2(); | 99 | ctxt->cr2 = read_cr2(); |
60 | ctxt->cr3 = read_cr3(); | 100 | ctxt->cr3 = read_cr3(); |
101 | #ifdef CONFIG_X86_32 | ||
61 | ctxt->cr4 = read_cr4_safe(); | 102 | ctxt->cr4 = read_cr4_safe(); |
103 | #else | ||
104 | /* CONFIG_X86_64 */ | ||
105 | ctxt->cr4 = read_cr4(); | ||
106 | ctxt->cr8 = read_cr8(); | ||
107 | #endif | ||
62 | } | 108 | } |
63 | 109 | ||
64 | /* Needed by apm.c */ | 110 | /* Needed by apm.c */ |
@@ -66,7 +112,9 @@ void save_processor_state(void) | |||
66 | { | 112 | { |
67 | __save_processor_state(&saved_context); | 113 | __save_processor_state(&saved_context); |
68 | } | 114 | } |
115 | #ifdef CONFIG_X86_32 | ||
69 | EXPORT_SYMBOL(save_processor_state); | 116 | EXPORT_SYMBOL(save_processor_state); |
117 | #endif | ||
70 | 118 | ||
71 | static void do_fpu_end(void) | 119 | static void do_fpu_end(void) |
72 | { | 120 | { |
diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c index 6ce0eca847c3..11ea7d0ba5d9 100644 --- a/arch/x86/power/cpu_64.c +++ b/arch/x86/power/cpu_64.c | |||
@@ -50,19 +50,35 @@ struct saved_context saved_context; | |||
50 | */ | 50 | */ |
51 | static void __save_processor_state(struct saved_context *ctxt) | 51 | static void __save_processor_state(struct saved_context *ctxt) |
52 | { | 52 | { |
53 | #ifdef CONFIG_X86_32 | ||
54 | mtrr_save_fixed_ranges(NULL); | ||
55 | #endif | ||
53 | kernel_fpu_begin(); | 56 | kernel_fpu_begin(); |
54 | 57 | ||
55 | /* | 58 | /* |
56 | * descriptor tables | 59 | * descriptor tables |
57 | */ | 60 | */ |
61 | #ifdef CONFIG_X86_32 | ||
62 | store_gdt(&ctxt->gdt); | ||
63 | store_idt(&ctxt->idt); | ||
64 | #else | ||
65 | /* CONFIG_X86_64 */ | ||
58 | store_gdt((struct desc_ptr *)&ctxt->gdt_limit); | 66 | store_gdt((struct desc_ptr *)&ctxt->gdt_limit); |
59 | store_idt((struct desc_ptr *)&ctxt->idt_limit); | 67 | store_idt((struct desc_ptr *)&ctxt->idt_limit); |
68 | #endif | ||
60 | store_tr(ctxt->tr); | 69 | store_tr(ctxt->tr); |
61 | 70 | ||
62 | /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ | 71 | /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ |
63 | /* | 72 | /* |
64 | * segment registers | 73 | * segment registers |
65 | */ | 74 | */ |
75 | #ifdef CONFIG_X86_32 | ||
76 | savesegment(es, ctxt->es); | ||
77 | savesegment(fs, ctxt->fs); | ||
78 | savesegment(gs, ctxt->gs); | ||
79 | savesegment(ss, ctxt->ss); | ||
80 | #else | ||
81 | /* CONFIG_X86_64 */ | ||
66 | asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); | 82 | asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); |
67 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); | 83 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); |
68 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); | 84 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); |
@@ -74,21 +90,32 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
74 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | 90 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); |
75 | mtrr_save_fixed_ranges(NULL); | 91 | mtrr_save_fixed_ranges(NULL); |
76 | 92 | ||
93 | rdmsrl(MSR_EFER, ctxt->efer); | ||
94 | #endif | ||
95 | |||
77 | /* | 96 | /* |
78 | * control registers | 97 | * control registers |
79 | */ | 98 | */ |
80 | rdmsrl(MSR_EFER, ctxt->efer); | ||
81 | ctxt->cr0 = read_cr0(); | 99 | ctxt->cr0 = read_cr0(); |
82 | ctxt->cr2 = read_cr2(); | 100 | ctxt->cr2 = read_cr2(); |
83 | ctxt->cr3 = read_cr3(); | 101 | ctxt->cr3 = read_cr3(); |
102 | #ifdef CONFIG_X86_32 | ||
103 | ctxt->cr4 = read_cr4_safe(); | ||
104 | #else | ||
105 | /* CONFIG_X86_64 */ | ||
84 | ctxt->cr4 = read_cr4(); | 106 | ctxt->cr4 = read_cr4(); |
85 | ctxt->cr8 = read_cr8(); | 107 | ctxt->cr8 = read_cr8(); |
108 | #endif | ||
86 | } | 109 | } |
87 | 110 | ||
111 | /* Needed by apm.c */ | ||
88 | void save_processor_state(void) | 112 | void save_processor_state(void) |
89 | { | 113 | { |
90 | __save_processor_state(&saved_context); | 114 | __save_processor_state(&saved_context); |
91 | } | 115 | } |
116 | #ifdef CONFIG_X86_32 | ||
117 | EXPORT_SYMBOL(save_processor_state); | ||
118 | #endif | ||
92 | 119 | ||
93 | static void do_fpu_end(void) | 120 | static void do_fpu_end(void) |
94 | { | 121 | { |