diff options
Diffstat (limited to 'arch/x86/power/hibernate_64.c')
-rw-r--r-- | arch/x86/power/hibernate_64.c | 97 |
1 files changed, 85 insertions, 12 deletions
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 009947d419a6..f2b5e6a5cf95 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/mtrr.h> | 19 | #include <asm/mtrr.h> |
20 | #include <asm/sections.h> | 20 | #include <asm/sections.h> |
21 | #include <asm/suspend.h> | 21 | #include <asm/suspend.h> |
22 | #include <asm/tlbflush.h> | ||
22 | 23 | ||
23 | /* Defined in hibernate_asm_64.S */ | 24 | /* Defined in hibernate_asm_64.S */ |
24 | extern asmlinkage __visible int restore_image(void); | 25 | extern asmlinkage __visible int restore_image(void); |
@@ -28,6 +29,7 @@ extern asmlinkage __visible int restore_image(void); | |||
28 | * kernel's text (this value is passed in the image header). | 29 | * kernel's text (this value is passed in the image header). |
29 | */ | 30 | */ |
30 | unsigned long restore_jump_address __visible; | 31 | unsigned long restore_jump_address __visible; |
32 | unsigned long jump_address_phys; | ||
31 | 33 | ||
32 | /* | 34 | /* |
33 | * Value of the cr3 register from before the hibernation (this value is passed | 35 | * Value of the cr3 register from before the hibernation (this value is passed |
@@ -37,7 +39,43 @@ unsigned long restore_cr3 __visible; | |||
37 | 39 | ||
38 | pgd_t *temp_level4_pgt __visible; | 40 | pgd_t *temp_level4_pgt __visible; |
39 | 41 | ||
40 | void *relocated_restore_code __visible; | 42 | unsigned long relocated_restore_code __visible; |
43 | |||
44 | static int set_up_temporary_text_mapping(void) | ||
45 | { | ||
46 | pmd_t *pmd; | ||
47 | pud_t *pud; | ||
48 | |||
49 | /* | ||
50 | * The new mapping only has to cover the page containing the image | ||
51 | * kernel's entry point (jump_address_phys), because the switch over to | ||
52 | * it is carried out by relocated code running from a page allocated | ||
53 | * specifically for this purpose and covered by the identity mapping, so | ||
54 | * the temporary kernel text mapping is only needed for the final jump. | ||
55 | * Moreover, in that mapping the virtual address of the image kernel's | ||
56 | * entry point must be the same as its virtual address in the image | ||
57 | * kernel (restore_jump_address), so the image kernel's | ||
58 | * restore_registers() code doesn't find itself in a different area of | ||
59 | * the virtual address space after switching over to the original page | ||
60 | * tables used by the image kernel. | ||
61 | */ | ||
62 | pud = (pud_t *)get_safe_page(GFP_ATOMIC); | ||
63 | if (!pud) | ||
64 | return -ENOMEM; | ||
65 | |||
66 | pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); | ||
67 | if (!pmd) | ||
68 | return -ENOMEM; | ||
69 | |||
70 | set_pmd(pmd + pmd_index(restore_jump_address), | ||
71 | __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC)); | ||
72 | set_pud(pud + pud_index(restore_jump_address), | ||
73 | __pud(__pa(pmd) | _KERNPG_TABLE)); | ||
74 | set_pgd(temp_level4_pgt + pgd_index(restore_jump_address), | ||
75 | __pgd(__pa(pud) | _KERNPG_TABLE)); | ||
76 | |||
77 | return 0; | ||
78 | } | ||
41 | 79 | ||
42 | static void *alloc_pgt_page(void *context) | 80 | static void *alloc_pgt_page(void *context) |
43 | { | 81 | { |
@@ -59,9 +97,10 @@ static int set_up_temporary_mappings(void) | |||
59 | if (!temp_level4_pgt) | 97 | if (!temp_level4_pgt) |
60 | return -ENOMEM; | 98 | return -ENOMEM; |
61 | 99 | ||
62 | /* It is safe to reuse the original kernel mapping */ | 100 | /* Prepare a temporary mapping for the kernel text */ |
63 | set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), | 101 | result = set_up_temporary_text_mapping(); |
64 | init_level4_pgt[pgd_index(__START_KERNEL_map)]); | 102 | if (result) |
103 | return result; | ||
65 | 104 | ||
66 | /* Set up the direct mapping from scratch */ | 105 | /* Set up the direct mapping from scratch */ |
67 | for (i = 0; i < nr_pfn_mapped; i++) { | 106 | for (i = 0; i < nr_pfn_mapped; i++) { |
@@ -78,19 +117,50 @@ static int set_up_temporary_mappings(void) | |||
78 | return 0; | 117 | return 0; |
79 | } | 118 | } |
80 | 119 | ||
120 | static int relocate_restore_code(void) | ||
121 | { | ||
122 | pgd_t *pgd; | ||
123 | pud_t *pud; | ||
124 | |||
125 | relocated_restore_code = get_safe_page(GFP_ATOMIC); | ||
126 | if (!relocated_restore_code) | ||
127 | return -ENOMEM; | ||
128 | |||
129 | memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE); | ||
130 | |||
131 | /* Make the page containing the relocated code executable */ | ||
132 | pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code); | ||
133 | pud = pud_offset(pgd, relocated_restore_code); | ||
134 | if (pud_large(*pud)) { | ||
135 | set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX)); | ||
136 | } else { | ||
137 | pmd_t *pmd = pmd_offset(pud, relocated_restore_code); | ||
138 | |||
139 | if (pmd_large(*pmd)) { | ||
140 | set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); | ||
141 | } else { | ||
142 | pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code); | ||
143 | |||
144 | set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX)); | ||
145 | } | ||
146 | } | ||
147 | __flush_tlb_all(); | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
81 | int swsusp_arch_resume(void) | 152 | int swsusp_arch_resume(void) |
82 | { | 153 | { |
83 | int error; | 154 | int error; |
84 | 155 | ||
85 | /* We have got enough memory and from now on we cannot recover */ | 156 | /* We have got enough memory and from now on we cannot recover */ |
86 | if ((error = set_up_temporary_mappings())) | 157 | error = set_up_temporary_mappings(); |
158 | if (error) | ||
87 | return error; | 159 | return error; |
88 | 160 | ||
89 | relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); | 161 | error = relocate_restore_code(); |
90 | if (!relocated_restore_code) | 162 | if (error) |
91 | return -ENOMEM; | 163 | return error; |
92 | memcpy(relocated_restore_code, &core_restore_code, | ||
93 | &restore_registers - &core_restore_code); | ||
94 | 164 | ||
95 | restore_image(); | 165 | restore_image(); |
96 | return 0; | 166 | return 0; |
@@ -109,11 +179,12 @@ int pfn_is_nosave(unsigned long pfn) | |||
109 | 179 | ||
110 | struct restore_data_record { | 180 | struct restore_data_record { |
111 | unsigned long jump_address; | 181 | unsigned long jump_address; |
182 | unsigned long jump_address_phys; | ||
112 | unsigned long cr3; | 183 | unsigned long cr3; |
113 | unsigned long magic; | 184 | unsigned long magic; |
114 | }; | 185 | }; |
115 | 186 | ||
116 | #define RESTORE_MAGIC 0x0123456789ABCDEFUL | 187 | #define RESTORE_MAGIC 0x123456789ABCDEF0UL |
117 | 188 | ||
118 | /** | 189 | /** |
119 | * arch_hibernation_header_save - populate the architecture specific part | 190 | * arch_hibernation_header_save - populate the architecture specific part |
@@ -126,7 +197,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size) | |||
126 | 197 | ||
127 | if (max_size < sizeof(struct restore_data_record)) | 198 | if (max_size < sizeof(struct restore_data_record)) |
128 | return -EOVERFLOW; | 199 | return -EOVERFLOW; |
129 | rdr->jump_address = restore_jump_address; | 200 | rdr->jump_address = (unsigned long)&restore_registers; |
201 | rdr->jump_address_phys = __pa_symbol(&restore_registers); | ||
130 | rdr->cr3 = restore_cr3; | 202 | rdr->cr3 = restore_cr3; |
131 | rdr->magic = RESTORE_MAGIC; | 203 | rdr->magic = RESTORE_MAGIC; |
132 | return 0; | 204 | return 0; |
@@ -142,6 +214,7 @@ int arch_hibernation_header_restore(void *addr) | |||
142 | struct restore_data_record *rdr = addr; | 214 | struct restore_data_record *rdr = addr; |
143 | 215 | ||
144 | restore_jump_address = rdr->jump_address; | 216 | restore_jump_address = rdr->jump_address; |
217 | jump_address_phys = rdr->jump_address_phys; | ||
145 | restore_cr3 = rdr->cr3; | 218 | restore_cr3 = rdr->cr3; |
146 | return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; | 219 | return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; |
147 | } | 220 | } |