diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2008-02-09 17:24:09 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-02-09 17:24:09 -0500 |
commit | c57591244a08bb441c83472f5c110151bb7c2cc6 (patch) | |
tree | adfb539d83e67079e8bb26085b12f9d33f0120ee /arch/x86/power/hibernate_32.c | |
parent | cf7700fe24301df2c8d3636cf40784651c098207 (diff) |
x86 PM: rename 32-bit files in arch/x86/power
Rename cpu.c, suspend.c and swsusp.S in arch/x86/power to cpu_32.c,
hibernate_32.c and hibernate_asm_32.S, respectively, and update the
purpose and copyright information in these files.
Update the Makefile in arch/x86/power to reflect the above changes.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/power/hibernate_32.c')
-rw-r--r-- | arch/x86/power/hibernate_32.c | 172 |
1 files changed, 172 insertions, 0 deletions
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c new file mode 100644 index 000000000000..5080c377ef12 --- /dev/null +++ b/arch/x86/power/hibernate_32.c | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * Hibernation support specific for i386 - temporary page tables | ||
3 | * | ||
4 | * Distribute under GPLv2 | ||
5 | * | ||
6 | * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl> | ||
7 | */ | ||
8 | |||
9 | #include <linux/suspend.h> | ||
10 | #include <linux/bootmem.h> | ||
11 | |||
12 | #include <asm/system.h> | ||
13 | #include <asm/page.h> | ||
14 | #include <asm/pgtable.h> | ||
15 | |||
16 | /* Defined in arch/i386/power/swsusp.S */ | ||
17 | extern int restore_image(void); | ||
18 | |||
19 | /* References to section boundaries */ | ||
20 | extern const void __nosave_begin, __nosave_end; | ||
21 | |||
22 | /* Pointer to the temporary resume page tables */ | ||
23 | pgd_t *resume_pg_dir; | ||
24 | |||
25 | /* The following three functions are based on the analogous code in | ||
26 | * arch/i386/mm/init.c | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * Create a middle page table on a resume-safe page and put a pointer to it in | ||
31 | * the given global directory entry. This only returns the gd entry | ||
32 | * in non-PAE compilation mode, since the middle layer is folded. | ||
33 | */ | ||
34 | static pmd_t *resume_one_md_table_init(pgd_t *pgd) | ||
35 | { | ||
36 | pud_t *pud; | ||
37 | pmd_t *pmd_table; | ||
38 | |||
39 | #ifdef CONFIG_X86_PAE | ||
40 | pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC); | ||
41 | if (!pmd_table) | ||
42 | return NULL; | ||
43 | |||
44 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | ||
45 | pud = pud_offset(pgd, 0); | ||
46 | |||
47 | BUG_ON(pmd_table != pmd_offset(pud, 0)); | ||
48 | #else | ||
49 | pud = pud_offset(pgd, 0); | ||
50 | pmd_table = pmd_offset(pud, 0); | ||
51 | #endif | ||
52 | |||
53 | return pmd_table; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Create a page table on a resume-safe page and place a pointer to it in | ||
58 | * a middle page directory entry. | ||
59 | */ | ||
60 | static pte_t *resume_one_page_table_init(pmd_t *pmd) | ||
61 | { | ||
62 | if (pmd_none(*pmd)) { | ||
63 | pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); | ||
64 | if (!page_table) | ||
65 | return NULL; | ||
66 | |||
67 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); | ||
68 | |||
69 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); | ||
70 | |||
71 | return page_table; | ||
72 | } | ||
73 | |||
74 | return pte_offset_kernel(pmd, 0); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * This maps the physical memory to kernel virtual address space, a total | ||
79 | * of max_low_pfn pages, by creating page tables starting from address | ||
80 | * PAGE_OFFSET. The page tables are allocated out of resume-safe pages. | ||
81 | */ | ||
82 | static int resume_physical_mapping_init(pgd_t *pgd_base) | ||
83 | { | ||
84 | unsigned long pfn; | ||
85 | pgd_t *pgd; | ||
86 | pmd_t *pmd; | ||
87 | pte_t *pte; | ||
88 | int pgd_idx, pmd_idx; | ||
89 | |||
90 | pgd_idx = pgd_index(PAGE_OFFSET); | ||
91 | pgd = pgd_base + pgd_idx; | ||
92 | pfn = 0; | ||
93 | |||
94 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { | ||
95 | pmd = resume_one_md_table_init(pgd); | ||
96 | if (!pmd) | ||
97 | return -ENOMEM; | ||
98 | |||
99 | if (pfn >= max_low_pfn) | ||
100 | continue; | ||
101 | |||
102 | for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { | ||
103 | if (pfn >= max_low_pfn) | ||
104 | break; | ||
105 | |||
106 | /* Map with big pages if possible, otherwise create | ||
107 | * normal page tables. | ||
108 | * NOTE: We can mark everything as executable here | ||
109 | */ | ||
110 | if (cpu_has_pse) { | ||
111 | set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); | ||
112 | pfn += PTRS_PER_PTE; | ||
113 | } else { | ||
114 | pte_t *max_pte; | ||
115 | |||
116 | pte = resume_one_page_table_init(pmd); | ||
117 | if (!pte) | ||
118 | return -ENOMEM; | ||
119 | |||
120 | max_pte = pte + PTRS_PER_PTE; | ||
121 | for (; pte < max_pte; pte++, pfn++) { | ||
122 | if (pfn >= max_low_pfn) | ||
123 | break; | ||
124 | |||
125 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | ||
126 | } | ||
127 | } | ||
128 | } | ||
129 | } | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static inline void resume_init_first_level_page_table(pgd_t *pg_dir) | ||
134 | { | ||
135 | #ifdef CONFIG_X86_PAE | ||
136 | int i; | ||
137 | |||
138 | /* Init entries of the first-level page table to the zero page */ | ||
139 | for (i = 0; i < PTRS_PER_PGD; i++) | ||
140 | set_pgd(pg_dir + i, | ||
141 | __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); | ||
142 | #endif | ||
143 | } | ||
144 | |||
145 | int swsusp_arch_resume(void) | ||
146 | { | ||
147 | int error; | ||
148 | |||
149 | resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); | ||
150 | if (!resume_pg_dir) | ||
151 | return -ENOMEM; | ||
152 | |||
153 | resume_init_first_level_page_table(resume_pg_dir); | ||
154 | error = resume_physical_mapping_init(resume_pg_dir); | ||
155 | if (error) | ||
156 | return error; | ||
157 | |||
158 | /* We have got enough memory and from now on we cannot recover */ | ||
159 | restore_image(); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * pfn_is_nosave - check if given pfn is in the 'nosave' section | ||
165 | */ | ||
166 | |||
167 | int pfn_is_nosave(unsigned long pfn) | ||
168 | { | ||
169 | unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; | ||
170 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; | ||
171 | return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); | ||
172 | } | ||