aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/power/hibernate_asm_64.S
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2008-02-09 17:24:09 -0500
committerThomas Gleixner <tglx@linutronix.de>2008-02-09 17:24:09 -0500
commitcf7700fe24301df2c8d3636cf40784651c098207 (patch)
tree2897be493f1b9af0c3a992c541a22f403c6e405c /arch/x86/power/hibernate_asm_64.S
parent9b706aee7d92d6ac3002547aea12e3eaa0a750ae (diff)
x86 PM: move 64-bit hibernation files to arch/x86/power
Move arch/x86/kernel/suspend_64.c to arch/x86/power . Move arch/x86/kernel/suspend_asm_64.S to arch/x86/power as hibernate_asm_64.S . Update purpose and copyright information in arch/x86/power/suspend_64.c and arch/x86/power/hibernate_asm_64.S . Update the Makefiles in arch/x86, arch/x86/kernel and arch/x86/power to reflect the above changes. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Pavel Machek <pavel@ucw.cz> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/power/hibernate_asm_64.S')
-rw-r--r--arch/x86/power/hibernate_asm_64.S146
1 files changed, 146 insertions, 0 deletions
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
new file mode 100644
index 000000000000..1deb3244b99b
--- /dev/null
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -0,0 +1,146 @@
1/*
2 * Hibernation support for x86-64
3 *
4 * Distribute under GPLv2.
5 *
6 * Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright 2005 Andi Kleen <ak@suse.de>
8 * Copyright 2004 Pavel Machek <pavel@suse.cz>
9 *
10 * swsusp_arch_resume must not use any stack or any nonlocal variables while
11 * copying pages:
12 *
13 * Its rewriting one kernel image with another. What is stack in "old"
14 * image could very well be data page in "new" image, and overwriting
15 * your own stack under you is bad idea.
16 */
17
18 .text
19#include <linux/linkage.h>
20#include <asm/segment.h>
21#include <asm/page.h>
22#include <asm/asm-offsets.h>
23
24ENTRY(swsusp_arch_suspend)
25 movq $saved_context, %rax
26 movq %rsp, pt_regs_sp(%rax)
27 movq %rbp, pt_regs_bp(%rax)
28 movq %rsi, pt_regs_si(%rax)
29 movq %rdi, pt_regs_di(%rax)
30 movq %rbx, pt_regs_bx(%rax)
31 movq %rcx, pt_regs_cx(%rax)
32 movq %rdx, pt_regs_dx(%rax)
33 movq %r8, pt_regs_r8(%rax)
34 movq %r9, pt_regs_r9(%rax)
35 movq %r10, pt_regs_r10(%rax)
36 movq %r11, pt_regs_r11(%rax)
37 movq %r12, pt_regs_r12(%rax)
38 movq %r13, pt_regs_r13(%rax)
39 movq %r14, pt_regs_r14(%rax)
40 movq %r15, pt_regs_r15(%rax)
41 pushfq
42 popq pt_regs_flags(%rax)
43
44 /* save the address of restore_registers */
45 movq $restore_registers, %rax
46 movq %rax, restore_jump_address(%rip)
47 /* save cr3 */
48 movq %cr3, %rax
49 movq %rax, restore_cr3(%rip)
50
51 call swsusp_save
52 ret
53
54ENTRY(restore_image)
55 /* switch to temporary page tables */
56 movq $__PAGE_OFFSET, %rdx
57 movq temp_level4_pgt(%rip), %rax
58 subq %rdx, %rax
59 movq %rax, %cr3
60 /* Flush TLB */
61 movq mmu_cr4_features(%rip), %rax
62 movq %rax, %rdx
63 andq $~(1<<7), %rdx # PGE
64 movq %rdx, %cr4; # turn off PGE
65 movq %cr3, %rcx; # flush TLB
66 movq %rcx, %cr3;
67 movq %rax, %cr4; # turn PGE back on
68
69 /* prepare to jump to the image kernel */
70 movq restore_jump_address(%rip), %rax
71 movq restore_cr3(%rip), %rbx
72
73 /* prepare to copy image data to their original locations */
74 movq restore_pblist(%rip), %rdx
75 movq relocated_restore_code(%rip), %rcx
76 jmpq *%rcx
77
78 /* code below has been relocated to a safe page */
79ENTRY(core_restore_code)
80loop:
81 testq %rdx, %rdx
82 jz done
83
84 /* get addresses from the pbe and copy the page */
85 movq pbe_address(%rdx), %rsi
86 movq pbe_orig_address(%rdx), %rdi
87 movq $(PAGE_SIZE >> 3), %rcx
88 rep
89 movsq
90
91 /* progress to the next pbe */
92 movq pbe_next(%rdx), %rdx
93 jmp loop
94done:
95 /* jump to the restore_registers address from the image header */
96 jmpq *%rax
97 /*
98 * NOTE: This assumes that the boot kernel's text mapping covers the
99 * image kernel's page containing restore_registers and the address of
100 * this page is the same as in the image kernel's text mapping (it
101 * should always be true, because the text mapping is linear, starting
102 * from 0, and is supposed to cover the entire kernel text for every
103 * kernel).
104 *
105 * code below belongs to the image kernel
106 */
107
108ENTRY(restore_registers)
109 /* go back to the original page tables */
110 movq %rbx, %cr3
111
112 /* Flush TLB, including "global" things (vmalloc) */
113 movq mmu_cr4_features(%rip), %rax
114 movq %rax, %rdx
115 andq $~(1<<7), %rdx; # PGE
116 movq %rdx, %cr4; # turn off PGE
117 movq %cr3, %rcx; # flush TLB
118 movq %rcx, %cr3
119 movq %rax, %cr4; # turn PGE back on
120
121 /* We don't restore %rax, it must be 0 anyway */
122 movq $saved_context, %rax
123 movq pt_regs_sp(%rax), %rsp
124 movq pt_regs_bp(%rax), %rbp
125 movq pt_regs_si(%rax), %rsi
126 movq pt_regs_di(%rax), %rdi
127 movq pt_regs_bx(%rax), %rbx
128 movq pt_regs_cx(%rax), %rcx
129 movq pt_regs_dx(%rax), %rdx
130 movq pt_regs_r8(%rax), %r8
131 movq pt_regs_r9(%rax), %r9
132 movq pt_regs_r10(%rax), %r10
133 movq pt_regs_r11(%rax), %r11
134 movq pt_regs_r12(%rax), %r12
135 movq pt_regs_r13(%rax), %r13
136 movq pt_regs_r14(%rax), %r14
137 movq pt_regs_r15(%rax), %r15
138 pushq pt_regs_flags(%rax)
139 popfq
140
141 xorq %rax, %rax
142
143 /* tell the hibernation core that we've just restored the memory */
144 movq %rax, in_suspend(%rip)
145
146 ret