diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2008-02-09 17:24:09 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-02-09 17:24:09 -0500 |
commit | ef8b03fabfbab0738dacbb6c0c38d5af91759ca1 (patch) | |
tree | c9d1be39c2d522752adbca205b12091cf24a62a4 /arch/x86/power/cpu_64.c | |
parent | c57591244a08bb441c83472f5c110151bb7c2cc6 (diff) |
x86 PM: consolidate suspend and hibernation code
Move the hibernation-specific code from arch/x86/power/suspend_64.c
to a separate file (hibernate_64.c) and the CPU-handling code to
cpu_64.c (in line with the corresponding 32-bit code).
Simplify arch/x86/power/Makefile .
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/power/cpu_64.c')
-rw-r--r-- | arch/x86/power/cpu_64.c | 166 |
1 files changed, 166 insertions, 0 deletions
diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c new file mode 100644 index 000000000000..66bdfb591fd8 --- /dev/null +++ b/arch/x86/power/cpu_64.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * Suspend and hibernation support for x86-64 | ||
3 | * | ||
4 | * Distribute under GPLv2 | ||
5 | * | ||
6 | * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> | ||
7 | * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> | ||
8 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> | ||
9 | */ | ||
10 | |||
11 | #include <linux/smp.h> | ||
12 | #include <linux/suspend.h> | ||
13 | #include <asm/proto.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/pgtable.h> | ||
16 | #include <asm/mtrr.h> | ||
17 | |||
18 | static void fix_processor_context(void); | ||
19 | |||
20 | struct saved_context saved_context; | ||
21 | |||
22 | /** | ||
23 | * __save_processor_state - save CPU registers before creating a | ||
24 | * hibernation image and before restoring the memory state from it | ||
25 | * @ctxt - structure to store the registers contents in | ||
26 | * | ||
27 | * NOTE: If there is a CPU register the modification of which by the | ||
28 | * boot kernel (ie. the kernel used for loading the hibernation image) | ||
29 | * might affect the operations of the restored target kernel (ie. the one | ||
30 | * saved in the hibernation image), then its contents must be saved by this | ||
31 | * function. In other words, if kernel A is hibernated and different | ||
32 | * kernel B is used for loading the hibernation image into memory, the | ||
33 | * kernel A's __save_processor_state() function must save all registers | ||
34 | * needed by kernel A, so that it can operate correctly after the resume | ||
35 | * regardless of what kernel B does in the meantime. | ||
36 | */ | ||
37 | static void __save_processor_state(struct saved_context *ctxt) | ||
38 | { | ||
39 | kernel_fpu_begin(); | ||
40 | |||
41 | /* | ||
42 | * descriptor tables | ||
43 | */ | ||
44 | store_gdt((struct desc_ptr *)&ctxt->gdt_limit); | ||
45 | store_idt((struct desc_ptr *)&ctxt->idt_limit); | ||
46 | store_tr(ctxt->tr); | ||
47 | |||
48 | /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ | ||
49 | /* | ||
50 | * segment registers | ||
51 | */ | ||
52 | asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); | ||
53 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); | ||
54 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); | ||
55 | asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); | ||
56 | asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); | ||
57 | |||
58 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); | ||
59 | rdmsrl(MSR_GS_BASE, ctxt->gs_base); | ||
60 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | ||
61 | mtrr_save_fixed_ranges(NULL); | ||
62 | |||
63 | /* | ||
64 | * control registers | ||
65 | */ | ||
66 | rdmsrl(MSR_EFER, ctxt->efer); | ||
67 | ctxt->cr0 = read_cr0(); | ||
68 | ctxt->cr2 = read_cr2(); | ||
69 | ctxt->cr3 = read_cr3(); | ||
70 | ctxt->cr4 = read_cr4(); | ||
71 | ctxt->cr8 = read_cr8(); | ||
72 | } | ||
73 | |||
74 | void save_processor_state(void) | ||
75 | { | ||
76 | __save_processor_state(&saved_context); | ||
77 | } | ||
78 | |||
79 | static void do_fpu_end(void) | ||
80 | { | ||
81 | /* | ||
82 | * Restore FPU regs if necessary | ||
83 | */ | ||
84 | kernel_fpu_end(); | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * __restore_processor_state - restore the contents of CPU registers saved | ||
89 | * by __save_processor_state() | ||
90 | * @ctxt - structure to load the registers contents from | ||
91 | */ | ||
92 | static void __restore_processor_state(struct saved_context *ctxt) | ||
93 | { | ||
94 | /* | ||
95 | * control registers | ||
96 | */ | ||
97 | wrmsrl(MSR_EFER, ctxt->efer); | ||
98 | write_cr8(ctxt->cr8); | ||
99 | write_cr4(ctxt->cr4); | ||
100 | write_cr3(ctxt->cr3); | ||
101 | write_cr2(ctxt->cr2); | ||
102 | write_cr0(ctxt->cr0); | ||
103 | |||
104 | /* | ||
105 | * now restore the descriptor tables to their proper values | ||
106 | * ltr is done i fix_processor_context(). | ||
107 | */ | ||
108 | load_gdt((const struct desc_ptr *)&ctxt->gdt_limit); | ||
109 | load_idt((const struct desc_ptr *)&ctxt->idt_limit); | ||
110 | |||
111 | |||
112 | /* | ||
113 | * segment registers | ||
114 | */ | ||
115 | asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); | ||
116 | asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); | ||
117 | asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); | ||
118 | load_gs_index(ctxt->gs); | ||
119 | asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); | ||
120 | |||
121 | wrmsrl(MSR_FS_BASE, ctxt->fs_base); | ||
122 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); | ||
123 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | ||
124 | |||
125 | fix_processor_context(); | ||
126 | |||
127 | do_fpu_end(); | ||
128 | mtrr_ap_init(); | ||
129 | } | ||
130 | |||
131 | void restore_processor_state(void) | ||
132 | { | ||
133 | __restore_processor_state(&saved_context); | ||
134 | } | ||
135 | |||
136 | static void fix_processor_context(void) | ||
137 | { | ||
138 | int cpu = smp_processor_id(); | ||
139 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
140 | |||
141 | /* | ||
142 | * This just modifies memory; should not be necessary. But... This | ||
143 | * is necessary, because 386 hardware has concept of busy TSS or some | ||
144 | * similar stupidity. | ||
145 | */ | ||
146 | set_tss_desc(cpu, t); | ||
147 | |||
148 | get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; | ||
149 | |||
150 | syscall_init(); /* This sets MSR_*STAR and related */ | ||
151 | load_TR_desc(); /* This does ltr */ | ||
152 | load_LDT(¤t->active_mm->context); /* This does lldt */ | ||
153 | |||
154 | /* | ||
155 | * Now maybe reload the debug registers | ||
156 | */ | ||
157 | if (current->thread.debugreg7){ | ||
158 | loaddebug(¤t->thread, 0); | ||
159 | loaddebug(¤t->thread, 1); | ||
160 | loaddebug(¤t->thread, 2); | ||
161 | loaddebug(¤t->thread, 3); | ||
162 | /* no 4 and 5 */ | ||
163 | loaddebug(¤t->thread, 6); | ||
164 | loaddebug(¤t->thread, 7); | ||
165 | } | ||
166 | } | ||