aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/suspend.c
diff options
context:
space:
mode:
authorPavel Machek <pavel@ucw.cz>2005-06-25 17:55:14 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:33 -0400
commit8d783b3e02002bce8cf9d4e4a82922ee7e59b1e5 (patch)
tree75c95b03d715caa1f5971b5c2182635618bdba0c /arch/x86_64/kernel/suspend.c
parentc61978b30322c83a94d7e4857fa5b9996b7d7931 (diff)
[PATCH] swsusp: clean assembly parts
This patch fixes register saving so that each register is only saved once, and adds missing saving of %cr8 on x86-64. Some reordering so that save/restore is more logical/safer (segment registers should be restored after gdt). Signed-off-by: Pavel Machek <pavel@suse.cz> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/suspend.c')
-rw-r--r--arch/x86_64/kernel/suspend.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index ebaa1e37d657..6c0f402e3a88 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -44,7 +44,6 @@ void __save_processor_state(struct saved_context *ctxt)
44 */ 44 */
45 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); 45 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
46 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); 46 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
47 asm volatile ("sldt %0" : "=m" (ctxt->ldt));
48 asm volatile ("str %0" : "=m" (ctxt->tr)); 47 asm volatile ("str %0" : "=m" (ctxt->tr));
49 48
50 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ 49 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
@@ -69,6 +68,7 @@ void __save_processor_state(struct saved_context *ctxt)
69 asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2)); 68 asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2));
70 asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3)); 69 asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3));
71 asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4)); 70 asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4));
71 asm volatile ("movq %%cr8, %0" : "=r" (ctxt->cr8));
72} 72}
73 73
74void save_processor_state(void) 74void save_processor_state(void)
@@ -90,12 +90,20 @@ void __restore_processor_state(struct saved_context *ctxt)
90 /* 90 /*
91 * control registers 91 * control registers
92 */ 92 */
93 asm volatile ("movq %0, %%cr8" :: "r" (ctxt->cr8));
93 asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4)); 94 asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4));
94 asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3)); 95 asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3));
95 asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2)); 96 asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2));
96 asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0)); 97 asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0));
97 98
98 /* 99 /*
100 * now restore the descriptor tables to their proper values
101 * ltr is done i fix_processor_context().
102 */
103 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
104 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
105
106 /*
99 * segment registers 107 * segment registers
100 */ 108 */
101 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); 109 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
@@ -108,14 +116,6 @@ void __restore_processor_state(struct saved_context *ctxt)
108 wrmsrl(MSR_GS_BASE, ctxt->gs_base); 116 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
109 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 117 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
110 118
111 /*
112 * now restore the descriptor tables to their proper values
113 * ltr is done i fix_processor_context().
114 */
115 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
116 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
117 asm volatile ("lldt %0" :: "m" (ctxt->ldt));
118
119 fix_processor_context(); 119 fix_processor_context();
120 120
121 do_fpu_end(); 121 do_fpu_end();