diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/i386/power |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/i386/power')
-rw-r--r-- | arch/i386/power/Makefile | 2 | ||||
-rw-r--r-- | arch/i386/power/cpu.c | 152 | ||||
-rw-r--r-- | arch/i386/power/swsusp.S | 73 |
3 files changed, 227 insertions, 0 deletions
diff --git a/arch/i386/power/Makefile b/arch/i386/power/Makefile new file mode 100644 index 000000000000..8cfa4e8a719d --- /dev/null +++ b/arch/i386/power/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_PM) += cpu.o | ||
2 | obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o | ||
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c new file mode 100644 index 000000000000..cf337c673d92 --- /dev/null +++ b/arch/i386/power/cpu.c | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * Suspend support specific for i386. | ||
3 | * | ||
4 | * Distribute under GPLv2 | ||
5 | * | ||
6 | * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> | ||
7 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/poll.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/sysrq.h> | ||
19 | #include <linux/proc_fs.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/pm.h> | ||
22 | #include <linux/device.h> | ||
23 | #include <linux/suspend.h> | ||
24 | #include <linux/acpi.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/acpi.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | static struct saved_context saved_context; | ||
30 | |||
31 | unsigned long saved_context_ebx; | ||
32 | unsigned long saved_context_esp, saved_context_ebp; | ||
33 | unsigned long saved_context_esi, saved_context_edi; | ||
34 | unsigned long saved_context_eflags; | ||
35 | |||
36 | extern void enable_sep_cpu(void *); | ||
37 | |||
38 | void __save_processor_state(struct saved_context *ctxt) | ||
39 | { | ||
40 | kernel_fpu_begin(); | ||
41 | |||
42 | /* | ||
43 | * descriptor tables | ||
44 | */ | ||
45 | asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); | ||
46 | asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); | ||
47 | asm volatile ("sldt %0" : "=m" (ctxt->ldt)); | ||
48 | asm volatile ("str %0" : "=m" (ctxt->tr)); | ||
49 | |||
50 | /* | ||
51 | * segment registers | ||
52 | */ | ||
53 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); | ||
54 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); | ||
55 | asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); | ||
56 | asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); | ||
57 | |||
58 | /* | ||
59 | * control registers | ||
60 | */ | ||
61 | asm volatile ("movl %%cr0, %0" : "=r" (ctxt->cr0)); | ||
62 | asm volatile ("movl %%cr2, %0" : "=r" (ctxt->cr2)); | ||
63 | asm volatile ("movl %%cr3, %0" : "=r" (ctxt->cr3)); | ||
64 | asm volatile ("movl %%cr4, %0" : "=r" (ctxt->cr4)); | ||
65 | } | ||
66 | |||
67 | void save_processor_state(void) | ||
68 | { | ||
69 | __save_processor_state(&saved_context); | ||
70 | } | ||
71 | |||
72 | static void | ||
73 | do_fpu_end(void) | ||
74 | { | ||
75 | /* restore FPU regs if necessary */ | ||
76 | /* Do it out of line so that gcc does not move cr0 load to some stupid place */ | ||
77 | kernel_fpu_end(); | ||
78 | mxcsr_feature_mask_init(); | ||
79 | } | ||
80 | |||
81 | |||
82 | static void fix_processor_context(void) | ||
83 | { | ||
84 | int cpu = smp_processor_id(); | ||
85 | struct tss_struct * t = &per_cpu(init_tss, cpu); | ||
86 | |||
87 | set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ | ||
88 | per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TSS].b &= 0xfffffdff; | ||
89 | |||
90 | load_TR_desc(); /* This does ltr */ | ||
91 | load_LDT(¤t->active_mm->context); /* This does lldt */ | ||
92 | |||
93 | /* | ||
94 | * Now maybe reload the debug registers | ||
95 | */ | ||
96 | if (current->thread.debugreg[7]){ | ||
97 | loaddebug(¤t->thread, 0); | ||
98 | loaddebug(¤t->thread, 1); | ||
99 | loaddebug(¤t->thread, 2); | ||
100 | loaddebug(¤t->thread, 3); | ||
101 | /* no 4 and 5 */ | ||
102 | loaddebug(¤t->thread, 6); | ||
103 | loaddebug(¤t->thread, 7); | ||
104 | } | ||
105 | |||
106 | } | ||
107 | |||
108 | void __restore_processor_state(struct saved_context *ctxt) | ||
109 | { | ||
110 | |||
111 | /* | ||
112 | * control registers | ||
113 | */ | ||
114 | asm volatile ("movl %0, %%cr4" :: "r" (ctxt->cr4)); | ||
115 | asm volatile ("movl %0, %%cr3" :: "r" (ctxt->cr3)); | ||
116 | asm volatile ("movl %0, %%cr2" :: "r" (ctxt->cr2)); | ||
117 | asm volatile ("movl %0, %%cr0" :: "r" (ctxt->cr0)); | ||
118 | |||
119 | /* | ||
120 | * segment registers | ||
121 | */ | ||
122 | asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); | ||
123 | asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); | ||
124 | asm volatile ("movw %0, %%gs" :: "r" (ctxt->gs)); | ||
125 | asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); | ||
126 | |||
127 | /* | ||
128 | * now restore the descriptor tables to their proper values | ||
129 | * ltr is done i fix_processor_context(). | ||
130 | */ | ||
131 | asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit)); | ||
132 | asm volatile ("lidt %0" :: "m" (ctxt->idt_limit)); | ||
133 | asm volatile ("lldt %0" :: "m" (ctxt->ldt)); | ||
134 | |||
135 | /* | ||
136 | * sysenter MSRs | ||
137 | */ | ||
138 | if (boot_cpu_has(X86_FEATURE_SEP)) | ||
139 | enable_sep_cpu(NULL); | ||
140 | |||
141 | fix_processor_context(); | ||
142 | do_fpu_end(); | ||
143 | } | ||
144 | |||
145 | void restore_processor_state(void) | ||
146 | { | ||
147 | __restore_processor_state(&saved_context); | ||
148 | } | ||
149 | |||
150 | /* Needed by apm.c */ | ||
151 | EXPORT_SYMBOL(save_processor_state); | ||
152 | EXPORT_SYMBOL(restore_processor_state); | ||
diff --git a/arch/i386/power/swsusp.S b/arch/i386/power/swsusp.S new file mode 100644 index 000000000000..c4105286ff26 --- /dev/null +++ b/arch/i386/power/swsusp.S | |||
@@ -0,0 +1,73 @@ | |||
1 | .text | ||
2 | |||
3 | /* Originally gcc generated, modified by hand | ||
4 | * | ||
5 | * This may not use any stack, nor any variable that is not "NoSave": | ||
6 | * | ||
7 | * Its rewriting one kernel image with another. What is stack in "old" | ||
8 | * image could very well be data page in "new" image, and overwriting | ||
9 | * your own stack under you is bad idea. | ||
10 | */ | ||
11 | |||
12 | #include <linux/linkage.h> | ||
13 | #include <asm/segment.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/asm_offsets.h> | ||
16 | |||
17 | .text | ||
18 | |||
19 | ENTRY(swsusp_arch_suspend) | ||
20 | |||
21 | movl %esp, saved_context_esp | ||
22 | movl %ebx, saved_context_ebx | ||
23 | movl %ebp, saved_context_ebp | ||
24 | movl %esi, saved_context_esi | ||
25 | movl %edi, saved_context_edi | ||
26 | pushfl ; popl saved_context_eflags | ||
27 | |||
28 | call swsusp_save | ||
29 | ret | ||
30 | |||
31 | ENTRY(swsusp_arch_resume) | ||
32 | movl $swsusp_pg_dir-__PAGE_OFFSET, %ecx | ||
33 | movl %ecx, %cr3 | ||
34 | |||
35 | movl pagedir_nosave, %edx | ||
36 | .p2align 4,,7 | ||
37 | |||
38 | copy_loop: | ||
39 | testl %edx, %edx | ||
40 | jz done | ||
41 | |||
42 | movl pbe_address(%edx), %esi | ||
43 | movl pbe_orig_address(%edx), %edi | ||
44 | |||
45 | movl $1024, %ecx | ||
46 | rep | ||
47 | movsl | ||
48 | |||
49 | movl pbe_next(%edx), %edx | ||
50 | jmp copy_loop | ||
51 | .p2align 4,,7 | ||
52 | |||
53 | done: | ||
54 | /* Flush TLB, including "global" things (vmalloc) */ | ||
55 | movl mmu_cr4_features, %eax | ||
56 | movl %eax, %edx | ||
57 | andl $~(1<<7), %edx; # PGE | ||
58 | movl %edx, %cr4; # turn off PGE | ||
59 | movl %cr3, %ecx; # flush TLB | ||
60 | movl %ecx, %cr3 | ||
61 | movl %eax, %cr4; # turn PGE back on | ||
62 | |||
63 | movl saved_context_esp, %esp | ||
64 | movl saved_context_ebp, %ebp | ||
65 | movl saved_context_ebx, %ebx | ||
66 | movl saved_context_esi, %esi | ||
67 | movl saved_context_edi, %edi | ||
68 | |||
69 | pushl saved_context_eflags ; popfl | ||
70 | |||
71 | xorl %eax, %eax | ||
72 | |||
73 | ret | ||