aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/suspend.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/suspend.c')
-rw-r--r--arch/x86_64/kernel/suspend.c95
1 files changed, 24 insertions, 71 deletions
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index f066c6ab3618..fd2bef780882 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -63,13 +63,12 @@ void save_processor_state(void)
63 __save_processor_state(&saved_context); 63 __save_processor_state(&saved_context);
64} 64}
65 65
66static void 66static void do_fpu_end(void)
67do_fpu_end(void)
68{ 67{
69 /* restore FPU regs if necessary */ 68 /*
70 /* Do it out of line so that gcc does not move cr0 load to some stupid place */ 69 * Restore FPU regs if necessary
71 kernel_fpu_end(); 70 */
72 mxcsr_feature_mask_init(); 71 kernel_fpu_end();
73} 72}
74 73
75void __restore_processor_state(struct saved_context *ctxt) 74void __restore_processor_state(struct saved_context *ctxt)
@@ -148,57 +147,7 @@ extern int restore_image(void);
148 147
149pgd_t *temp_level4_pgt; 148pgd_t *temp_level4_pgt;
150 149
151static void **pages; 150static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
152
153static inline void *__add_page(void)
154{
155 void **c;
156
157 c = (void **)get_usable_page(GFP_ATOMIC);
158 if (c) {
159 *c = pages;
160 pages = c;
161 }
162 return c;
163}
164
165static inline void *__next_page(void)
166{
167 void **c;
168
169 c = pages;
170 if (c) {
171 pages = *c;
172 *c = NULL;
173 }
174 return c;
175}
176
177/*
178 * Try to allocate as many usable pages as needed and daisy chain them.
179 * If one allocation fails, free the pages allocated so far
180 */
181static int alloc_usable_pages(unsigned long n)
182{
183 void *p;
184
185 pages = NULL;
186 do
187 if (!__add_page())
188 break;
189 while (--n);
190 if (n) {
191 p = __next_page();
192 while (p) {
193 free_page((unsigned long)p);
194 p = __next_page();
195 }
196 return -ENOMEM;
197 }
198 return 0;
199}
200
201static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
202{ 151{
203 long i, j; 152 long i, j;
204 153
@@ -212,7 +161,9 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
212 if (paddr >= end) 161 if (paddr >= end)
213 break; 162 break;
214 163
215 pmd = (pmd_t *)__next_page(); 164 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
165 if (!pmd)
166 return -ENOMEM;
216 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 167 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
217 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { 168 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
218 unsigned long pe; 169 unsigned long pe;
@@ -224,13 +175,17 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
224 set_pmd(pmd, __pmd(pe)); 175 set_pmd(pmd, __pmd(pe));
225 } 176 }
226 } 177 }
178 return 0;
227} 179}
228 180
229static void set_up_temporary_mappings(void) 181static int set_up_temporary_mappings(void)
230{ 182{
231 unsigned long start, end, next; 183 unsigned long start, end, next;
184 int error;
232 185
233 temp_level4_pgt = (pgd_t *)__next_page(); 186 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
187 if (!temp_level4_pgt)
188 return -ENOMEM;
234 189
235 /* It is safe to reuse the original kernel mapping */ 190 /* It is safe to reuse the original kernel mapping */
236 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 191 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
@@ -241,29 +196,27 @@ static void set_up_temporary_mappings(void)
241 end = (unsigned long)pfn_to_kaddr(end_pfn); 196 end = (unsigned long)pfn_to_kaddr(end_pfn);
242 197
243 for (; start < end; start = next) { 198 for (; start < end; start = next) {
244 pud_t *pud = (pud_t *)__next_page(); 199 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
200 if (!pud)
201 return -ENOMEM;
245 next = start + PGDIR_SIZE; 202 next = start + PGDIR_SIZE;
246 if (next > end) 203 if (next > end)
247 next = end; 204 next = end;
248 res_phys_pud_init(pud, __pa(start), __pa(next)); 205 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
206 return error;
249 set_pgd(temp_level4_pgt + pgd_index(start), 207 set_pgd(temp_level4_pgt + pgd_index(start),
250 mk_kernel_pgd(__pa(pud))); 208 mk_kernel_pgd(__pa(pud)));
251 } 209 }
210 return 0;
252} 211}
253 212
254int swsusp_arch_resume(void) 213int swsusp_arch_resume(void)
255{ 214{
256 unsigned long n; 215 int error;
257 216
258 n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT;
259 n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1;
260 pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n);
261 if (alloc_usable_pages(n)) {
262 free_eaten_memory();
263 return -ENOMEM;
264 }
265 /* We have got enough memory and from now on we cannot recover */ 217 /* We have got enough memory and from now on we cannot recover */
266 set_up_temporary_mappings(); 218 if ((error = set_up_temporary_mappings()))
219 return error;
267 restore_image(); 220 restore_image();
268 return 0; 221 return 0;
269} 222}