diff options
Diffstat (limited to 'arch/arm64/kernel/hibernate.c')
-rw-r--r-- | arch/arm64/kernel/hibernate.c | 82 |
1 files changed, 49 insertions, 33 deletions
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 21ab5df9fa76..65d81f965e74 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
36 | #include <asm/smp.h> | 36 | #include <asm/smp.h> |
37 | #include <asm/suspend.h> | 37 | #include <asm/suspend.h> |
38 | #include <asm/sysreg.h> | ||
38 | #include <asm/virt.h> | 39 | #include <asm/virt.h> |
39 | 40 | ||
40 | /* | 41 | /* |
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length, | |||
217 | set_pte(pte, __pte(virt_to_phys((void *)dst) | | 218 | set_pte(pte, __pte(virt_to_phys((void *)dst) | |
218 | pgprot_val(PAGE_KERNEL_EXEC))); | 219 | pgprot_val(PAGE_KERNEL_EXEC))); |
219 | 220 | ||
220 | /* Load our new page tables */ | 221 | /* |
221 | asm volatile("msr ttbr0_el1, %0;" | 222 | * Load our new page tables. A strict BBM approach requires that we |
222 | "isb;" | 223 | * ensure that TLBs are free of any entries that may overlap with the |
223 | "tlbi vmalle1is;" | 224 | * global mappings we are about to install. |
224 | "dsb ish;" | 225 | * |
225 | "isb" : : "r"(virt_to_phys(pgd))); | 226 | * For a real hibernate/resume cycle TTBR0 currently points to a zero |
227 | * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI | ||
228 | * runtime services), while for a userspace-driven test_resume cycle it | ||
229 | * points to userspace page tables (and we must point it at a zero page | ||
230 | * ourselves). Elsewhere we only (un)install the idmap with preemption | ||
231 | * disabled, so T0SZ should be as required regardless. | ||
232 | */ | ||
233 | cpu_set_reserved_ttbr0(); | ||
234 | local_flush_tlb_all(); | ||
235 | write_sysreg(virt_to_phys(pgd), ttbr0_el1); | ||
236 | isb(); | ||
226 | 237 | ||
227 | *phys_dst_addr = virt_to_phys((void *)dst); | 238 | *phys_dst_addr = virt_to_phys((void *)dst); |
228 | 239 | ||
@@ -394,6 +405,38 @@ int swsusp_arch_resume(void) | |||
394 | void *, phys_addr_t, phys_addr_t); | 405 | void *, phys_addr_t, phys_addr_t); |
395 | 406 | ||
396 | /* | 407 | /* |
408 | * Restoring the memory image will overwrite the ttbr1 page tables. | ||
409 | * Create a second copy of just the linear map, and use this when | ||
410 | * restoring. | ||
411 | */ | ||
412 | tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); | ||
413 | if (!tmp_pg_dir) { | ||
414 | pr_err("Failed to allocate memory for temporary page tables."); | ||
415 | rc = -ENOMEM; | ||
416 | goto out; | ||
417 | } | ||
418 | rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); | ||
419 | if (rc) | ||
420 | goto out; | ||
421 | |||
422 | /* | ||
423 | * Since we only copied the linear map, we need to find restore_pblist's | ||
424 | * linear map address. | ||
425 | */ | ||
426 | lm_restore_pblist = LMADDR(restore_pblist); | ||
427 | |||
428 | /* | ||
429 | * We need a zero page that is zero before & after resume in order to | ||
430 | * to break before make on the ttbr1 page tables. | ||
431 | */ | ||
432 | zero_page = (void *)get_safe_page(GFP_ATOMIC); | ||
433 | if (!zero_page) { | ||
434 | pr_err("Failed to allocate zero page."); | ||
435 | rc = -ENOMEM; | ||
436 | goto out; | ||
437 | } | ||
438 | |||
439 | /* | ||
397 | * Locate the exit code in the bottom-but-one page, so that *NULL | 440 | * Locate the exit code in the bottom-but-one page, so that *NULL |
398 | * still has disastrous affects. | 441 | * still has disastrous affects. |
399 | */ | 442 | */ |
@@ -419,27 +462,6 @@ int swsusp_arch_resume(void) | |||
419 | __flush_dcache_area(hibernate_exit, exit_size); | 462 | __flush_dcache_area(hibernate_exit, exit_size); |
420 | 463 | ||
421 | /* | 464 | /* |
422 | * Restoring the memory image will overwrite the ttbr1 page tables. | ||
423 | * Create a second copy of just the linear map, and use this when | ||
424 | * restoring. | ||
425 | */ | ||
426 | tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); | ||
427 | if (!tmp_pg_dir) { | ||
428 | pr_err("Failed to allocate memory for temporary page tables."); | ||
429 | rc = -ENOMEM; | ||
430 | goto out; | ||
431 | } | ||
432 | rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); | ||
433 | if (rc) | ||
434 | goto out; | ||
435 | |||
436 | /* | ||
437 | * Since we only copied the linear map, we need to find restore_pblist's | ||
438 | * linear map address. | ||
439 | */ | ||
440 | lm_restore_pblist = LMADDR(restore_pblist); | ||
441 | |||
442 | /* | ||
443 | * KASLR will cause the el2 vectors to be in a different location in | 465 | * KASLR will cause the el2 vectors to be in a different location in |
444 | * the resumed kernel. Load hibernate's temporary copy into el2. | 466 | * the resumed kernel. Load hibernate's temporary copy into el2. |
445 | * | 467 | * |
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void) | |||
453 | __hyp_set_vectors(el2_vectors); | 475 | __hyp_set_vectors(el2_vectors); |
454 | } | 476 | } |
455 | 477 | ||
456 | /* | ||
457 | * We need a zero page that is zero before & after resume in order to | ||
458 | * to break before make on the ttbr1 page tables. | ||
459 | */ | ||
460 | zero_page = (void *)get_safe_page(GFP_ATOMIC); | ||
461 | |||
462 | hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, | 478 | hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, |
463 | resume_hdr.reenter_kernel, lm_restore_pblist, | 479 | resume_hdr.reenter_kernel, lm_restore_pblist, |
464 | resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); | 480 | resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); |