diff options
Diffstat (limited to 'arch/i386/power/cpu.c')
-rw-r--r-- | arch/i386/power/cpu.c | 152 |
1 files changed, 152 insertions, 0 deletions
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c new file mode 100644 index 000000000000..cf337c673d92 --- /dev/null +++ b/arch/i386/power/cpu.c | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * Suspend support specific for i386. | ||
3 | * | ||
4 | * Distribute under GPLv2 | ||
5 | * | ||
6 | * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> | ||
7 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/poll.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/sysrq.h> | ||
19 | #include <linux/proc_fs.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/pm.h> | ||
22 | #include <linux/device.h> | ||
23 | #include <linux/suspend.h> | ||
24 | #include <linux/acpi.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/acpi.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | static struct saved_context saved_context; | ||
30 | |||
31 | unsigned long saved_context_ebx; | ||
32 | unsigned long saved_context_esp, saved_context_ebp; | ||
33 | unsigned long saved_context_esi, saved_context_edi; | ||
34 | unsigned long saved_context_eflags; | ||
35 | |||
36 | extern void enable_sep_cpu(void *); | ||
37 | |||
38 | void __save_processor_state(struct saved_context *ctxt) | ||
39 | { | ||
40 | kernel_fpu_begin(); | ||
41 | |||
42 | /* | ||
43 | * descriptor tables | ||
44 | */ | ||
45 | asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); | ||
46 | asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); | ||
47 | asm volatile ("sldt %0" : "=m" (ctxt->ldt)); | ||
48 | asm volatile ("str %0" : "=m" (ctxt->tr)); | ||
49 | |||
50 | /* | ||
51 | * segment registers | ||
52 | */ | ||
53 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); | ||
54 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); | ||
55 | asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); | ||
56 | asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); | ||
57 | |||
58 | /* | ||
59 | * control registers | ||
60 | */ | ||
61 | asm volatile ("movl %%cr0, %0" : "=r" (ctxt->cr0)); | ||
62 | asm volatile ("movl %%cr2, %0" : "=r" (ctxt->cr2)); | ||
63 | asm volatile ("movl %%cr3, %0" : "=r" (ctxt->cr3)); | ||
64 | asm volatile ("movl %%cr4, %0" : "=r" (ctxt->cr4)); | ||
65 | } | ||
66 | |||
67 | void save_processor_state(void) | ||
68 | { | ||
69 | __save_processor_state(&saved_context); | ||
70 | } | ||
71 | |||
72 | static void | ||
73 | do_fpu_end(void) | ||
74 | { | ||
75 | /* restore FPU regs if necessary */ | ||
76 | /* Do it out of line so that gcc does not move cr0 load to some stupid place */ | ||
77 | kernel_fpu_end(); | ||
78 | mxcsr_feature_mask_init(); | ||
79 | } | ||
80 | |||
81 | |||
82 | static void fix_processor_context(void) | ||
83 | { | ||
84 | int cpu = smp_processor_id(); | ||
85 | struct tss_struct * t = &per_cpu(init_tss, cpu); | ||
86 | |||
87 | set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ | ||
88 | per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TSS].b &= 0xfffffdff; | ||
89 | |||
90 | load_TR_desc(); /* This does ltr */ | ||
91 | load_LDT(¤t->active_mm->context); /* This does lldt */ | ||
92 | |||
93 | /* | ||
94 | * Now maybe reload the debug registers | ||
95 | */ | ||
96 | if (current->thread.debugreg[7]){ | ||
97 | loaddebug(¤t->thread, 0); | ||
98 | loaddebug(¤t->thread, 1); | ||
99 | loaddebug(¤t->thread, 2); | ||
100 | loaddebug(¤t->thread, 3); | ||
101 | /* no 4 and 5 */ | ||
102 | loaddebug(¤t->thread, 6); | ||
103 | loaddebug(¤t->thread, 7); | ||
104 | } | ||
105 | |||
106 | } | ||
107 | |||
108 | void __restore_processor_state(struct saved_context *ctxt) | ||
109 | { | ||
110 | |||
111 | /* | ||
112 | * control registers | ||
113 | */ | ||
114 | asm volatile ("movl %0, %%cr4" :: "r" (ctxt->cr4)); | ||
115 | asm volatile ("movl %0, %%cr3" :: "r" (ctxt->cr3)); | ||
116 | asm volatile ("movl %0, %%cr2" :: "r" (ctxt->cr2)); | ||
117 | asm volatile ("movl %0, %%cr0" :: "r" (ctxt->cr0)); | ||
118 | |||
119 | /* | ||
120 | * segment registers | ||
121 | */ | ||
122 | asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); | ||
123 | asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); | ||
124 | asm volatile ("movw %0, %%gs" :: "r" (ctxt->gs)); | ||
125 | asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); | ||
126 | |||
127 | /* | ||
128 | * now restore the descriptor tables to their proper values | ||
129 | * ltr is done i fix_processor_context(). | ||
130 | */ | ||
131 | asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit)); | ||
132 | asm volatile ("lidt %0" :: "m" (ctxt->idt_limit)); | ||
133 | asm volatile ("lldt %0" :: "m" (ctxt->ldt)); | ||
134 | |||
135 | /* | ||
136 | * sysenter MSRs | ||
137 | */ | ||
138 | if (boot_cpu_has(X86_FEATURE_SEP)) | ||
139 | enable_sep_cpu(NULL); | ||
140 | |||
141 | fix_processor_context(); | ||
142 | do_fpu_end(); | ||
143 | } | ||
144 | |||
145 | void restore_processor_state(void) | ||
146 | { | ||
147 | __restore_processor_state(&saved_context); | ||
148 | } | ||
149 | |||
150 | /* Needed by apm.c */ | ||
151 | EXPORT_SYMBOL(save_processor_state); | ||
152 | EXPORT_SYMBOL(restore_processor_state); | ||