diff options
Diffstat (limited to 'arch/x86/kernel/suspend_asm_64.S')
-rw-r--r-- | arch/x86/kernel/suspend_asm_64.S | 41 |
1 files changed, 34 insertions, 7 deletions
diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S index 16d183f67bc1..40a209e0525c 100644 --- a/arch/x86/kernel/suspend_asm_64.S +++ b/arch/x86/kernel/suspend_asm_64.S | |||
@@ -2,8 +2,8 @@ | |||
2 | * | 2 | * |
3 | * Distribute under GPLv2. | 3 | * Distribute under GPLv2. |
4 | * | 4 | * |
5 | * swsusp_arch_resume may not use any stack, nor any variable that is | 5 | * swsusp_arch_resume must not use any stack or any nonlocal variables while |
6 | * not "NoSave" during copying pages: | 6 | * copying pages: |
7 | * | 7 | * |
8 | * Its rewriting one kernel image with another. What is stack in "old" | 8 | * Its rewriting one kernel image with another. What is stack in "old" |
9 | * image could very well be data page in "new" image, and overwriting | 9 | * image could very well be data page in "new" image, and overwriting |
@@ -36,6 +36,10 @@ ENTRY(swsusp_arch_suspend) | |||
36 | movq %r15, saved_context_r15(%rip) | 36 | movq %r15, saved_context_r15(%rip) |
37 | pushfq ; popq saved_context_eflags(%rip) | 37 | pushfq ; popq saved_context_eflags(%rip) |
38 | 38 | ||
39 | /* save the address of restore_registers */ | ||
40 | movq $restore_registers, %rax | ||
41 | movq %rax, restore_jump_address(%rip) | ||
42 | |||
39 | call swsusp_save | 43 | call swsusp_save |
40 | ret | 44 | ret |
41 | 45 | ||
@@ -54,7 +58,16 @@ ENTRY(restore_image) | |||
54 | movq %rcx, %cr3; | 58 | movq %rcx, %cr3; |
55 | movq %rax, %cr4; # turn PGE back on | 59 | movq %rax, %cr4; # turn PGE back on |
56 | 60 | ||
61 | /* prepare to jump to the image kernel */ | ||
62 | movq restore_jump_address(%rip), %rax | ||
63 | |||
64 | /* prepare to copy image data to their original locations */ | ||
57 | movq restore_pblist(%rip), %rdx | 65 | movq restore_pblist(%rip), %rdx |
66 | movq relocated_restore_code(%rip), %rcx | ||
67 | jmpq *%rcx | ||
68 | |||
69 | /* code below has been relocated to a safe page */ | ||
70 | ENTRY(core_restore_code) | ||
58 | loop: | 71 | loop: |
59 | testq %rdx, %rdx | 72 | testq %rdx, %rdx |
60 | jz done | 73 | jz done |
@@ -62,7 +75,7 @@ loop: | |||
62 | /* get addresses from the pbe and copy the page */ | 75 | /* get addresses from the pbe and copy the page */ |
63 | movq pbe_address(%rdx), %rsi | 76 | movq pbe_address(%rdx), %rsi |
64 | movq pbe_orig_address(%rdx), %rdi | 77 | movq pbe_orig_address(%rdx), %rdi |
65 | movq $512, %rcx | 78 | movq $(PAGE_SIZE >> 3), %rcx |
66 | rep | 79 | rep |
67 | movsq | 80 | movsq |
68 | 81 | ||
@@ -70,6 +83,20 @@ loop: | |||
70 | movq pbe_next(%rdx), %rdx | 83 | movq pbe_next(%rdx), %rdx |
71 | jmp loop | 84 | jmp loop |
72 | done: | 85 | done: |
86 | /* jump to the restore_registers address from the image header */ | ||
87 | jmpq *%rax | ||
88 | /* | ||
89 | * NOTE: This assumes that the boot kernel's text mapping covers the | ||
90 | * image kernel's page containing restore_registers and the address of | ||
91 | * this page is the same as in the image kernel's text mapping (it | ||
92 | * should always be true, because the text mapping is linear, starting | ||
93 | * from 0, and is supposed to cover the entire kernel text for every | ||
94 | * kernel). | ||
95 | * | ||
96 | * code below belongs to the image kernel | ||
97 | */ | ||
98 | |||
99 | ENTRY(restore_registers) | ||
73 | /* go back to the original page tables */ | 100 | /* go back to the original page tables */ |
74 | movq $(init_level4_pgt - __START_KERNEL_map), %rax | 101 | movq $(init_level4_pgt - __START_KERNEL_map), %rax |
75 | addq phys_base(%rip), %rax | 102 | addq phys_base(%rip), %rax |
@@ -84,12 +111,9 @@ done: | |||
84 | movq %rcx, %cr3 | 111 | movq %rcx, %cr3 |
85 | movq %rax, %cr4; # turn PGE back on | 112 | movq %rax, %cr4; # turn PGE back on |
86 | 113 | ||
87 | movl $24, %eax | ||
88 | movl %eax, %ds | ||
89 | |||
90 | movq saved_context_esp(%rip), %rsp | 114 | movq saved_context_esp(%rip), %rsp |
91 | movq saved_context_ebp(%rip), %rbp | 115 | movq saved_context_ebp(%rip), %rbp |
92 | /* Don't restore %rax, it must be 0 anyway */ | 116 | /* restore GPRs (we don't restore %rax, it must be 0 anyway) */ |
93 | movq saved_context_ebx(%rip), %rbx | 117 | movq saved_context_ebx(%rip), %rbx |
94 | movq saved_context_ecx(%rip), %rcx | 118 | movq saved_context_ecx(%rip), %rcx |
95 | movq saved_context_edx(%rip), %rdx | 119 | movq saved_context_edx(%rip), %rdx |
@@ -107,4 +131,7 @@ done: | |||
107 | 131 | ||
108 | xorq %rax, %rax | 132 | xorq %rax, %rax |
109 | 133 | ||
134 | /* tell the hibernation core that we've just restored the memory */ | ||
135 | movq %rax, in_suspend(%rip) | ||
136 | |||
110 | ret | 137 | ret |