diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:16:34 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:16:34 -0400 |
commit | 4b60eb8380a0b588a03b6052d7ac93e1964c75b8 (patch) | |
tree | 0b66caf260001230144ba3d09bde84d3bd58dc81 /arch/x86 | |
parent | 44f0257fc316ff4b33aa3438dd8d891b7d6d72b9 (diff) |
i386: move power
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/power/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/power/cpu.c | 133 | ||||
-rw-r--r-- | arch/x86/power/suspend.c | 172 | ||||
-rw-r--r-- | arch/x86/power/swsusp.S | 78 |
4 files changed, 385 insertions, 0 deletions
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile new file mode 100644 index 000000000000..d764ec950065 --- /dev/null +++ b/arch/x86/power/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_PM) += cpu.o | ||
2 | obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o | ||
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c new file mode 100644 index 000000000000..998fd3ec0d68 --- /dev/null +++ b/arch/x86/power/cpu.c | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * Suspend support specific for i386. | ||
3 | * | ||
4 | * Distribute under GPLv2 | ||
5 | * | ||
6 | * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> | ||
7 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/suspend.h> | ||
12 | #include <asm/mtrr.h> | ||
13 | #include <asm/mce.h> | ||
14 | |||
15 | static struct saved_context saved_context; | ||
16 | |||
17 | unsigned long saved_context_ebx; | ||
18 | unsigned long saved_context_esp, saved_context_ebp; | ||
19 | unsigned long saved_context_esi, saved_context_edi; | ||
20 | unsigned long saved_context_eflags; | ||
21 | |||
22 | void __save_processor_state(struct saved_context *ctxt) | ||
23 | { | ||
24 | mtrr_save_fixed_ranges(NULL); | ||
25 | kernel_fpu_begin(); | ||
26 | |||
27 | /* | ||
28 | * descriptor tables | ||
29 | */ | ||
30 | store_gdt(&ctxt->gdt); | ||
31 | store_idt(&ctxt->idt); | ||
32 | store_tr(ctxt->tr); | ||
33 | |||
34 | /* | ||
35 | * segment registers | ||
36 | */ | ||
37 | savesegment(es, ctxt->es); | ||
38 | savesegment(fs, ctxt->fs); | ||
39 | savesegment(gs, ctxt->gs); | ||
40 | savesegment(ss, ctxt->ss); | ||
41 | |||
42 | /* | ||
43 | * control registers | ||
44 | */ | ||
45 | ctxt->cr0 = read_cr0(); | ||
46 | ctxt->cr2 = read_cr2(); | ||
47 | ctxt->cr3 = read_cr3(); | ||
48 | ctxt->cr4 = read_cr4(); | ||
49 | } | ||
50 | |||
51 | void save_processor_state(void) | ||
52 | { | ||
53 | __save_processor_state(&saved_context); | ||
54 | } | ||
55 | |||
56 | static void do_fpu_end(void) | ||
57 | { | ||
58 | /* | ||
59 | * Restore FPU regs if necessary. | ||
60 | */ | ||
61 | kernel_fpu_end(); | ||
62 | } | ||
63 | |||
64 | static void fix_processor_context(void) | ||
65 | { | ||
66 | int cpu = smp_processor_id(); | ||
67 | struct tss_struct * t = &per_cpu(init_tss, cpu); | ||
68 | |||
69 | set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ | ||
70 | |||
71 | load_TR_desc(); /* This does ltr */ | ||
72 | load_LDT(¤t->active_mm->context); /* This does lldt */ | ||
73 | |||
74 | /* | ||
75 | * Now maybe reload the debug registers | ||
76 | */ | ||
77 | if (current->thread.debugreg[7]){ | ||
78 | set_debugreg(current->thread.debugreg[0], 0); | ||
79 | set_debugreg(current->thread.debugreg[1], 1); | ||
80 | set_debugreg(current->thread.debugreg[2], 2); | ||
81 | set_debugreg(current->thread.debugreg[3], 3); | ||
82 | /* no 4 and 5 */ | ||
83 | set_debugreg(current->thread.debugreg[6], 6); | ||
84 | set_debugreg(current->thread.debugreg[7], 7); | ||
85 | } | ||
86 | |||
87 | } | ||
88 | |||
89 | void __restore_processor_state(struct saved_context *ctxt) | ||
90 | { | ||
91 | /* | ||
92 | * control registers | ||
93 | */ | ||
94 | write_cr4(ctxt->cr4); | ||
95 | write_cr3(ctxt->cr3); | ||
96 | write_cr2(ctxt->cr2); | ||
97 | write_cr0(ctxt->cr0); | ||
98 | |||
99 | /* | ||
100 | * now restore the descriptor tables to their proper values | ||
101 | * ltr is done i fix_processor_context(). | ||
102 | */ | ||
103 | load_gdt(&ctxt->gdt); | ||
104 | load_idt(&ctxt->idt); | ||
105 | |||
106 | /* | ||
107 | * segment registers | ||
108 | */ | ||
109 | loadsegment(es, ctxt->es); | ||
110 | loadsegment(fs, ctxt->fs); | ||
111 | loadsegment(gs, ctxt->gs); | ||
112 | loadsegment(ss, ctxt->ss); | ||
113 | |||
114 | /* | ||
115 | * sysenter MSRs | ||
116 | */ | ||
117 | if (boot_cpu_has(X86_FEATURE_SEP)) | ||
118 | enable_sep_cpu(); | ||
119 | |||
120 | fix_processor_context(); | ||
121 | do_fpu_end(); | ||
122 | mtrr_ap_init(); | ||
123 | mcheck_init(&boot_cpu_data); | ||
124 | } | ||
125 | |||
126 | void restore_processor_state(void) | ||
127 | { | ||
128 | __restore_processor_state(&saved_context); | ||
129 | } | ||
130 | |||
131 | /* Needed by apm.c */ | ||
132 | EXPORT_SYMBOL(save_processor_state); | ||
133 | EXPORT_SYMBOL(restore_processor_state); | ||
diff --git a/arch/x86/power/suspend.c b/arch/x86/power/suspend.c new file mode 100644 index 000000000000..a0020b913f31 --- /dev/null +++ b/arch/x86/power/suspend.c | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * Suspend support specific for i386 - temporary page tables | ||
3 | * | ||
4 | * Distribute under GPLv2 | ||
5 | * | ||
6 | * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl> | ||
7 | */ | ||
8 | |||
9 | #include <linux/suspend.h> | ||
10 | #include <linux/bootmem.h> | ||
11 | |||
12 | #include <asm/system.h> | ||
13 | #include <asm/page.h> | ||
14 | #include <asm/pgtable.h> | ||
15 | |||
16 | /* Defined in arch/i386/power/swsusp.S */ | ||
17 | extern int restore_image(void); | ||
18 | |||
19 | /* References to section boundaries */ | ||
20 | extern const void __nosave_begin, __nosave_end; | ||
21 | |||
22 | /* Pointer to the temporary resume page tables */ | ||
23 | pgd_t *resume_pg_dir; | ||
24 | |||
25 | /* The following three functions are based on the analogous code in | ||
26 | * arch/i386/mm/init.c | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * Create a middle page table on a resume-safe page and put a pointer to it in | ||
31 | * the given global directory entry. This only returns the gd entry | ||
32 | * in non-PAE compilation mode, since the middle layer is folded. | ||
33 | */ | ||
34 | static pmd_t *resume_one_md_table_init(pgd_t *pgd) | ||
35 | { | ||
36 | pud_t *pud; | ||
37 | pmd_t *pmd_table; | ||
38 | |||
39 | #ifdef CONFIG_X86_PAE | ||
40 | pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC); | ||
41 | if (!pmd_table) | ||
42 | return NULL; | ||
43 | |||
44 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | ||
45 | pud = pud_offset(pgd, 0); | ||
46 | |||
47 | BUG_ON(pmd_table != pmd_offset(pud, 0)); | ||
48 | #else | ||
49 | pud = pud_offset(pgd, 0); | ||
50 | pmd_table = pmd_offset(pud, 0); | ||
51 | #endif | ||
52 | |||
53 | return pmd_table; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Create a page table on a resume-safe page and place a pointer to it in | ||
58 | * a middle page directory entry. | ||
59 | */ | ||
60 | static pte_t *resume_one_page_table_init(pmd_t *pmd) | ||
61 | { | ||
62 | if (pmd_none(*pmd)) { | ||
63 | pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); | ||
64 | if (!page_table) | ||
65 | return NULL; | ||
66 | |||
67 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); | ||
68 | |||
69 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); | ||
70 | |||
71 | return page_table; | ||
72 | } | ||
73 | |||
74 | return pte_offset_kernel(pmd, 0); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * This maps the physical memory to kernel virtual address space, a total | ||
79 | * of max_low_pfn pages, by creating page tables starting from address | ||
80 | * PAGE_OFFSET. The page tables are allocated out of resume-safe pages. | ||
81 | */ | ||
82 | static int resume_physical_mapping_init(pgd_t *pgd_base) | ||
83 | { | ||
84 | unsigned long pfn; | ||
85 | pgd_t *pgd; | ||
86 | pmd_t *pmd; | ||
87 | pte_t *pte; | ||
88 | int pgd_idx, pmd_idx; | ||
89 | |||
90 | pgd_idx = pgd_index(PAGE_OFFSET); | ||
91 | pgd = pgd_base + pgd_idx; | ||
92 | pfn = 0; | ||
93 | |||
94 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { | ||
95 | pmd = resume_one_md_table_init(pgd); | ||
96 | if (!pmd) | ||
97 | return -ENOMEM; | ||
98 | |||
99 | if (pfn >= max_low_pfn) | ||
100 | continue; | ||
101 | |||
102 | for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { | ||
103 | if (pfn >= max_low_pfn) | ||
104 | break; | ||
105 | |||
106 | /* Map with big pages if possible, otherwise create | ||
107 | * normal page tables. | ||
108 | * NOTE: We can mark everything as executable here | ||
109 | */ | ||
110 | if (cpu_has_pse) { | ||
111 | set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); | ||
112 | pfn += PTRS_PER_PTE; | ||
113 | } else { | ||
114 | pte_t *max_pte; | ||
115 | |||
116 | pte = resume_one_page_table_init(pmd); | ||
117 | if (!pte) | ||
118 | return -ENOMEM; | ||
119 | |||
120 | max_pte = pte + PTRS_PER_PTE; | ||
121 | for (; pte < max_pte; pte++, pfn++) { | ||
122 | if (pfn >= max_low_pfn) | ||
123 | break; | ||
124 | |||
125 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | ||
126 | } | ||
127 | } | ||
128 | } | ||
129 | } | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static inline void resume_init_first_level_page_table(pgd_t *pg_dir) | ||
134 | { | ||
135 | #ifdef CONFIG_X86_PAE | ||
136 | int i; | ||
137 | |||
138 | /* Init entries of the first-level page table to the zero page */ | ||
139 | for (i = 0; i < PTRS_PER_PGD; i++) | ||
140 | set_pgd(pg_dir + i, | ||
141 | __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); | ||
142 | #endif | ||
143 | } | ||
144 | |||
145 | int swsusp_arch_resume(void) | ||
146 | { | ||
147 | int error; | ||
148 | |||
149 | resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); | ||
150 | if (!resume_pg_dir) | ||
151 | return -ENOMEM; | ||
152 | |||
153 | resume_init_first_level_page_table(resume_pg_dir); | ||
154 | error = resume_physical_mapping_init(resume_pg_dir); | ||
155 | if (error) | ||
156 | return error; | ||
157 | |||
158 | /* We have got enough memory and from now on we cannot recover */ | ||
159 | restore_image(); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * pfn_is_nosave - check if given pfn is in the 'nosave' section | ||
165 | */ | ||
166 | |||
167 | int pfn_is_nosave(unsigned long pfn) | ||
168 | { | ||
169 | unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; | ||
170 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; | ||
171 | return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); | ||
172 | } | ||
diff --git a/arch/x86/power/swsusp.S b/arch/x86/power/swsusp.S new file mode 100644 index 000000000000..53662e05b393 --- /dev/null +++ b/arch/x86/power/swsusp.S | |||
@@ -0,0 +1,78 @@ | |||
1 | .text | ||
2 | |||
3 | /* Originally gcc generated, modified by hand | ||
4 | * | ||
5 | * This may not use any stack, nor any variable that is not "NoSave": | ||
6 | * | ||
7 | * Its rewriting one kernel image with another. What is stack in "old" | ||
8 | * image could very well be data page in "new" image, and overwriting | ||
9 | * your own stack under you is bad idea. | ||
10 | */ | ||
11 | |||
12 | #include <linux/linkage.h> | ||
13 | #include <asm/segment.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/asm-offsets.h> | ||
16 | |||
17 | .text | ||
18 | |||
19 | ENTRY(swsusp_arch_suspend) | ||
20 | |||
21 | movl %esp, saved_context_esp | ||
22 | movl %ebx, saved_context_ebx | ||
23 | movl %ebp, saved_context_ebp | ||
24 | movl %esi, saved_context_esi | ||
25 | movl %edi, saved_context_edi | ||
26 | pushfl ; popl saved_context_eflags | ||
27 | |||
28 | call swsusp_save | ||
29 | ret | ||
30 | |||
31 | ENTRY(restore_image) | ||
32 | movl resume_pg_dir, %ecx | ||
33 | subl $__PAGE_OFFSET, %ecx | ||
34 | movl %ecx, %cr3 | ||
35 | |||
36 | movl restore_pblist, %edx | ||
37 | .p2align 4,,7 | ||
38 | |||
39 | copy_loop: | ||
40 | testl %edx, %edx | ||
41 | jz done | ||
42 | |||
43 | movl pbe_address(%edx), %esi | ||
44 | movl pbe_orig_address(%edx), %edi | ||
45 | |||
46 | movl $1024, %ecx | ||
47 | rep | ||
48 | movsl | ||
49 | |||
50 | movl pbe_next(%edx), %edx | ||
51 | jmp copy_loop | ||
52 | .p2align 4,,7 | ||
53 | |||
54 | done: | ||
55 | /* go back to the original page tables */ | ||
56 | movl $swapper_pg_dir, %ecx | ||
57 | subl $__PAGE_OFFSET, %ecx | ||
58 | movl %ecx, %cr3 | ||
59 | /* Flush TLB, including "global" things (vmalloc) */ | ||
60 | movl mmu_cr4_features, %eax | ||
61 | movl %eax, %edx | ||
62 | andl $~(1<<7), %edx; # PGE | ||
63 | movl %edx, %cr4; # turn off PGE | ||
64 | movl %cr3, %ecx; # flush TLB | ||
65 | movl %ecx, %cr3 | ||
66 | movl %eax, %cr4; # turn PGE back on | ||
67 | |||
68 | movl saved_context_esp, %esp | ||
69 | movl saved_context_ebp, %ebp | ||
70 | movl saved_context_ebx, %ebx | ||
71 | movl saved_context_esi, %esi | ||
72 | movl saved_context_edi, %edi | ||
73 | |||
74 | pushl saved_context_eflags ; popfl | ||
75 | |||
76 | xorl %eax, %eax | ||
77 | |||
78 | ret | ||