diff options
author | Tony Luck <tony.luck@intel.com> | 2005-10-31 13:51:57 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-10-31 13:51:57 -0500 |
commit | c7fb577e2a6cb04732541f2dc402bd46747f7558 (patch) | |
tree | df3b1a1922ed13bfbcc45d08650c38beeb1a7bd1 /arch/x86_64/kernel/suspend.c | |
parent | 9cec58dc138d6fcad9f447a19c8ff69f6540e667 (diff) | |
parent | 581c1b14394aee60aff46ea67d05483261ed6527 (diff) |
manual update from upstream:
Applied Al's change 06a544971fad0992fe8b92c5647538d573089dd4
to new location of swiotlb.c
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/x86_64/kernel/suspend.c')
-rw-r--r-- | arch/x86_64/kernel/suspend.c | 95 |
1 files changed, 24 insertions, 71 deletions
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c index f066c6ab3618..fd2bef780882 100644 --- a/arch/x86_64/kernel/suspend.c +++ b/arch/x86_64/kernel/suspend.c | |||
@@ -63,13 +63,12 @@ void save_processor_state(void) | |||
63 | __save_processor_state(&saved_context); | 63 | __save_processor_state(&saved_context); |
64 | } | 64 | } |
65 | 65 | ||
66 | static void | 66 | static void do_fpu_end(void) |
67 | do_fpu_end(void) | ||
68 | { | 67 | { |
69 | /* restore FPU regs if necessary */ | 68 | /* |
70 | /* Do it out of line so that gcc does not move cr0 load to some stupid place */ | 69 | * Restore FPU regs if necessary |
71 | kernel_fpu_end(); | 70 | */ |
72 | mxcsr_feature_mask_init(); | 71 | kernel_fpu_end(); |
73 | } | 72 | } |
74 | 73 | ||
75 | void __restore_processor_state(struct saved_context *ctxt) | 74 | void __restore_processor_state(struct saved_context *ctxt) |
@@ -148,57 +147,7 @@ extern int restore_image(void); | |||
148 | 147 | ||
149 | pgd_t *temp_level4_pgt; | 148 | pgd_t *temp_level4_pgt; |
150 | 149 | ||
151 | static void **pages; | 150 | static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) |
152 | |||
153 | static inline void *__add_page(void) | ||
154 | { | ||
155 | void **c; | ||
156 | |||
157 | c = (void **)get_usable_page(GFP_ATOMIC); | ||
158 | if (c) { | ||
159 | *c = pages; | ||
160 | pages = c; | ||
161 | } | ||
162 | return c; | ||
163 | } | ||
164 | |||
165 | static inline void *__next_page(void) | ||
166 | { | ||
167 | void **c; | ||
168 | |||
169 | c = pages; | ||
170 | if (c) { | ||
171 | pages = *c; | ||
172 | *c = NULL; | ||
173 | } | ||
174 | return c; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Try to allocate as many usable pages as needed and daisy chain them. | ||
179 | * If one allocation fails, free the pages allocated so far | ||
180 | */ | ||
181 | static int alloc_usable_pages(unsigned long n) | ||
182 | { | ||
183 | void *p; | ||
184 | |||
185 | pages = NULL; | ||
186 | do | ||
187 | if (!__add_page()) | ||
188 | break; | ||
189 | while (--n); | ||
190 | if (n) { | ||
191 | p = __next_page(); | ||
192 | while (p) { | ||
193 | free_page((unsigned long)p); | ||
194 | p = __next_page(); | ||
195 | } | ||
196 | return -ENOMEM; | ||
197 | } | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) | ||
202 | { | 151 | { |
203 | long i, j; | 152 | long i, j; |
204 | 153 | ||
@@ -212,7 +161,9 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e | |||
212 | if (paddr >= end) | 161 | if (paddr >= end) |
213 | break; | 162 | break; |
214 | 163 | ||
215 | pmd = (pmd_t *)__next_page(); | 164 | pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); |
165 | if (!pmd) | ||
166 | return -ENOMEM; | ||
216 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | 167 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); |
217 | for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { | 168 | for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { |
218 | unsigned long pe; | 169 | unsigned long pe; |
@@ -224,13 +175,17 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e | |||
224 | set_pmd(pmd, __pmd(pe)); | 175 | set_pmd(pmd, __pmd(pe)); |
225 | } | 176 | } |
226 | } | 177 | } |
178 | return 0; | ||
227 | } | 179 | } |
228 | 180 | ||
229 | static void set_up_temporary_mappings(void) | 181 | static int set_up_temporary_mappings(void) |
230 | { | 182 | { |
231 | unsigned long start, end, next; | 183 | unsigned long start, end, next; |
184 | int error; | ||
232 | 185 | ||
233 | temp_level4_pgt = (pgd_t *)__next_page(); | 186 | temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); |
187 | if (!temp_level4_pgt) | ||
188 | return -ENOMEM; | ||
234 | 189 | ||
235 | /* It is safe to reuse the original kernel mapping */ | 190 | /* It is safe to reuse the original kernel mapping */ |
236 | set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), | 191 | set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), |
@@ -241,29 +196,27 @@ static void set_up_temporary_mappings(void) | |||
241 | end = (unsigned long)pfn_to_kaddr(end_pfn); | 196 | end = (unsigned long)pfn_to_kaddr(end_pfn); |
242 | 197 | ||
243 | for (; start < end; start = next) { | 198 | for (; start < end; start = next) { |
244 | pud_t *pud = (pud_t *)__next_page(); | 199 | pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); |
200 | if (!pud) | ||
201 | return -ENOMEM; | ||
245 | next = start + PGDIR_SIZE; | 202 | next = start + PGDIR_SIZE; |
246 | if (next > end) | 203 | if (next > end) |
247 | next = end; | 204 | next = end; |
248 | res_phys_pud_init(pud, __pa(start), __pa(next)); | 205 | if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) |
206 | return error; | ||
249 | set_pgd(temp_level4_pgt + pgd_index(start), | 207 | set_pgd(temp_level4_pgt + pgd_index(start), |
250 | mk_kernel_pgd(__pa(pud))); | 208 | mk_kernel_pgd(__pa(pud))); |
251 | } | 209 | } |
210 | return 0; | ||
252 | } | 211 | } |
253 | 212 | ||
254 | int swsusp_arch_resume(void) | 213 | int swsusp_arch_resume(void) |
255 | { | 214 | { |
256 | unsigned long n; | 215 | int error; |
257 | 216 | ||
258 | n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT; | ||
259 | n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1; | ||
260 | pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n); | ||
261 | if (alloc_usable_pages(n)) { | ||
262 | free_eaten_memory(); | ||
263 | return -ENOMEM; | ||
264 | } | ||
265 | /* We have got enough memory and from now on we cannot recover */ | 217 | /* We have got enough memory and from now on we cannot recover */ |
266 | set_up_temporary_mappings(); | 218 | if ((error = set_up_temporary_mappings())) |
219 | return error; | ||
267 | restore_image(); | 220 | restore_image(); |
268 | return 0; | 221 | return 0; |
269 | } | 222 | } |