diff options
-rw-r--r-- | arch/x86/kernel/machine_kexec_32.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/machine_kexec_64.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/relocate_kernel_32.S | 24 | ||||
-rw-r--r-- | arch/x86/kernel/relocate_kernel_64.S | 24 |
4 files changed, 50 insertions, 30 deletions
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index f5fc8c781a62..e7368c1da01d 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c | |||
@@ -14,12 +14,12 @@ | |||
14 | #include <linux/ftrace.h> | 14 | #include <linux/ftrace.h> |
15 | #include <linux/suspend.h> | 15 | #include <linux/suspend.h> |
16 | #include <linux/gfp.h> | 16 | #include <linux/gfp.h> |
17 | #include <linux/io.h> | ||
17 | 18 | ||
18 | #include <asm/pgtable.h> | 19 | #include <asm/pgtable.h> |
19 | #include <asm/pgalloc.h> | 20 | #include <asm/pgalloc.h> |
20 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
21 | #include <asm/mmu_context.h> | 22 | #include <asm/mmu_context.h> |
22 | #include <asm/io.h> | ||
23 | #include <asm/apic.h> | 23 | #include <asm/apic.h> |
24 | #include <asm/cpufeature.h> | 24 | #include <asm/cpufeature.h> |
25 | #include <asm/desc.h> | 25 | #include <asm/desc.h> |
@@ -63,7 +63,7 @@ static void load_segments(void) | |||
63 | "\tmovl %%eax,%%fs\n" | 63 | "\tmovl %%eax,%%fs\n" |
64 | "\tmovl %%eax,%%gs\n" | 64 | "\tmovl %%eax,%%gs\n" |
65 | "\tmovl %%eax,%%ss\n" | 65 | "\tmovl %%eax,%%ss\n" |
66 | ::: "eax", "memory"); | 66 | : : : "eax", "memory"); |
67 | #undef STR | 67 | #undef STR |
68 | #undef __STR | 68 | #undef __STR |
69 | } | 69 | } |
@@ -205,7 +205,8 @@ void machine_kexec(struct kimage *image) | |||
205 | 205 | ||
206 | if (image->preserve_context) { | 206 | if (image->preserve_context) { |
207 | #ifdef CONFIG_X86_IO_APIC | 207 | #ifdef CONFIG_X86_IO_APIC |
208 | /* We need to put APICs in legacy mode so that we can | 208 | /* |
209 | * We need to put APICs in legacy mode so that we can | ||
209 | * get timer interrupts in second kernel. kexec/kdump | 210 | * get timer interrupts in second kernel. kexec/kdump |
210 | * paths already have calls to disable_IO_APIC() in | 211 | * paths already have calls to disable_IO_APIC() in |
211 | * one form or other. kexec jump path also need | 212 | * one form or other. kexec jump path also need |
@@ -227,7 +228,8 @@ void machine_kexec(struct kimage *image) | |||
227 | page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) | 228 | page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) |
228 | << PAGE_SHIFT); | 229 | << PAGE_SHIFT); |
229 | 230 | ||
230 | /* The segment registers are funny things, they have both a | 231 | /* |
232 | * The segment registers are funny things, they have both a | ||
231 | * visible and an invisible part. Whenever the visible part is | 233 | * visible and an invisible part. Whenever the visible part is |
232 | * set to a specific selector, the invisible part is loaded | 234 | * set to a specific selector, the invisible part is loaded |
233 | * with from a table in memory. At no other time is the | 235 | * with from a table in memory. At no other time is the |
@@ -237,11 +239,12 @@ void machine_kexec(struct kimage *image) | |||
237 | * segments, before I zap the gdt with an invalid value. | 239 | * segments, before I zap the gdt with an invalid value. |
238 | */ | 240 | */ |
239 | load_segments(); | 241 | load_segments(); |
240 | /* The gdt & idt are now invalid. | 242 | /* |
243 | * The gdt & idt are now invalid. | ||
241 | * If you want to load them you must set up your own idt & gdt. | 244 | * If you want to load them you must set up your own idt & gdt. |
242 | */ | 245 | */ |
243 | set_gdt(phys_to_virt(0),0); | 246 | set_gdt(phys_to_virt(0), 0); |
244 | set_idt(phys_to_virt(0),0); | 247 | set_idt(phys_to_virt(0), 0); |
245 | 248 | ||
246 | /* now call it */ | 249 | /* now call it */ |
247 | image->start = relocate_kernel_ptr((unsigned long)image->head, | 250 | image->start = relocate_kernel_ptr((unsigned long)image->head, |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 6993d51b7fd8..f8c796fffa0f 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -12,11 +12,11 @@ | |||
12 | #include <linux/reboot.h> | 12 | #include <linux/reboot.h> |
13 | #include <linux/numa.h> | 13 | #include <linux/numa.h> |
14 | #include <linux/ftrace.h> | 14 | #include <linux/ftrace.h> |
15 | #include <linux/io.h> | ||
15 | 16 | ||
16 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
17 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
18 | #include <asm/mmu_context.h> | 19 | #include <asm/mmu_context.h> |
19 | #include <asm/io.h> | ||
20 | 20 | ||
21 | static void init_level2_page(pmd_t *level2p, unsigned long addr) | 21 | static void init_level2_page(pmd_t *level2p, unsigned long addr) |
22 | { | 22 | { |
@@ -83,9 +83,8 @@ static int init_level4_page(struct kimage *image, pgd_t *level4p, | |||
83 | } | 83 | } |
84 | level3p = (pud_t *)page_address(page); | 84 | level3p = (pud_t *)page_address(page); |
85 | result = init_level3_page(image, level3p, addr, last_addr); | 85 | result = init_level3_page(image, level3p, addr, last_addr); |
86 | if (result) { | 86 | if (result) |
87 | goto out; | 87 | goto out; |
88 | } | ||
89 | set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE)); | 88 | set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE)); |
90 | addr += PGDIR_SIZE; | 89 | addr += PGDIR_SIZE; |
91 | } | 90 | } |
@@ -242,7 +241,8 @@ void machine_kexec(struct kimage *image) | |||
242 | page_list[PA_TABLE_PAGE] = | 241 | page_list[PA_TABLE_PAGE] = |
243 | (unsigned long)__pa(page_address(image->control_code_page)); | 242 | (unsigned long)__pa(page_address(image->control_code_page)); |
244 | 243 | ||
245 | /* The segment registers are funny things, they have both a | 244 | /* |
245 | * The segment registers are funny things, they have both a | ||
246 | * visible and an invisible part. Whenever the visible part is | 246 | * visible and an invisible part. Whenever the visible part is |
247 | * set to a specific selector, the invisible part is loaded | 247 | * set to a specific selector, the invisible part is loaded |
248 | * with from a table in memory. At no other time is the | 248 | * with from a table in memory. At no other time is the |
@@ -252,11 +252,12 @@ void machine_kexec(struct kimage *image) | |||
252 | * segments, before I zap the gdt with an invalid value. | 252 | * segments, before I zap the gdt with an invalid value. |
253 | */ | 253 | */ |
254 | load_segments(); | 254 | load_segments(); |
255 | /* The gdt & idt are now invalid. | 255 | /* |
256 | * The gdt & idt are now invalid. | ||
256 | * If you want to load them you must set up your own idt & gdt. | 257 | * If you want to load them you must set up your own idt & gdt. |
257 | */ | 258 | */ |
258 | set_gdt(phys_to_virt(0),0); | 259 | set_gdt(phys_to_virt(0), 0); |
259 | set_idt(phys_to_virt(0),0); | 260 | set_idt(phys_to_virt(0), 0); |
260 | 261 | ||
261 | /* now call it */ | 262 | /* now call it */ |
262 | relocate_kernel((unsigned long)image->head, (unsigned long)page_list, | 263 | relocate_kernel((unsigned long)image->head, (unsigned long)page_list, |
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index 2064d0aa8d28..41235531b11c 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S | |||
@@ -17,7 +17,8 @@ | |||
17 | 17 | ||
18 | #define PTR(x) (x << 2) | 18 | #define PTR(x) (x << 2) |
19 | 19 | ||
20 | /* control_page + KEXEC_CONTROL_CODE_MAX_SIZE | 20 | /* |
21 | * control_page + KEXEC_CONTROL_CODE_MAX_SIZE | ||
21 | * ~ control_page + PAGE_SIZE are used as data storage and stack for | 22 | * ~ control_page + PAGE_SIZE are used as data storage and stack for |
22 | * jumping back | 23 | * jumping back |
23 | */ | 24 | */ |
@@ -76,8 +77,10 @@ relocate_kernel: | |||
76 | movl %eax, CP_PA_SWAP_PAGE(%edi) | 77 | movl %eax, CP_PA_SWAP_PAGE(%edi) |
77 | movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi) | 78 | movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi) |
78 | 79 | ||
79 | /* get physical address of control page now */ | 80 | /* |
80 | /* this is impossible after page table switch */ | 81 | * get physical address of control page now |
82 | * this is impossible after page table switch | ||
83 | */ | ||
81 | movl PTR(PA_CONTROL_PAGE)(%ebp), %edi | 84 | movl PTR(PA_CONTROL_PAGE)(%ebp), %edi |
82 | 85 | ||
83 | /* switch to new set of page tables */ | 86 | /* switch to new set of page tables */ |
@@ -97,7 +100,8 @@ identity_mapped: | |||
97 | /* store the start address on the stack */ | 100 | /* store the start address on the stack */ |
98 | pushl %edx | 101 | pushl %edx |
99 | 102 | ||
100 | /* Set cr0 to a known state: | 103 | /* |
104 | * Set cr0 to a known state: | ||
101 | * - Paging disabled | 105 | * - Paging disabled |
102 | * - Alignment check disabled | 106 | * - Alignment check disabled |
103 | * - Write protect disabled | 107 | * - Write protect disabled |
@@ -113,7 +117,8 @@ identity_mapped: | |||
113 | /* clear cr4 if applicable */ | 117 | /* clear cr4 if applicable */ |
114 | testl %ecx, %ecx | 118 | testl %ecx, %ecx |
115 | jz 1f | 119 | jz 1f |
116 | /* Set cr4 to a known state: | 120 | /* |
121 | * Set cr4 to a known state: | ||
117 | * Setting everything to zero seems safe. | 122 | * Setting everything to zero seems safe. |
118 | */ | 123 | */ |
119 | xorl %eax, %eax | 124 | xorl %eax, %eax |
@@ -132,15 +137,18 @@ identity_mapped: | |||
132 | call swap_pages | 137 | call swap_pages |
133 | addl $8, %esp | 138 | addl $8, %esp |
134 | 139 | ||
135 | /* To be certain of avoiding problems with self-modifying code | 140 | /* |
141 | * To be certain of avoiding problems with self-modifying code | ||
136 | * I need to execute a serializing instruction here. | 142 | * I need to execute a serializing instruction here. |
137 | * So I flush the TLB, it's handy, and not processor dependent. | 143 | * So I flush the TLB, it's handy, and not processor dependent. |
138 | */ | 144 | */ |
139 | xorl %eax, %eax | 145 | xorl %eax, %eax |
140 | movl %eax, %cr3 | 146 | movl %eax, %cr3 |
141 | 147 | ||
142 | /* set all of the registers to known values */ | 148 | /* |
143 | /* leave %esp alone */ | 149 | * set all of the registers to known values |
150 | * leave %esp alone | ||
151 | */ | ||
144 | 152 | ||
145 | testl %esi, %esi | 153 | testl %esi, %esi |
146 | jnz 1f | 154 | jnz 1f |
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index d32cfb27a479..cfc0d24003dc 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S | |||
@@ -24,7 +24,8 @@ | |||
24 | .code64 | 24 | .code64 |
25 | .globl relocate_kernel | 25 | .globl relocate_kernel |
26 | relocate_kernel: | 26 | relocate_kernel: |
27 | /* %rdi indirection_page | 27 | /* |
28 | * %rdi indirection_page | ||
28 | * %rsi page_list | 29 | * %rsi page_list |
29 | * %rdx start address | 30 | * %rdx start address |
30 | */ | 31 | */ |
@@ -33,8 +34,10 @@ relocate_kernel: | |||
33 | pushq $0 | 34 | pushq $0 |
34 | popfq | 35 | popfq |
35 | 36 | ||
36 | /* get physical address of control page now */ | 37 | /* |
37 | /* this is impossible after page table switch */ | 38 | * get physical address of control page now |
39 | * this is impossible after page table switch | ||
40 | */ | ||
38 | movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 | 41 | movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 |
39 | 42 | ||
40 | /* get physical address of page table now too */ | 43 | /* get physical address of page table now too */ |
@@ -55,7 +58,8 @@ identity_mapped: | |||
55 | /* store the start address on the stack */ | 58 | /* store the start address on the stack */ |
56 | pushq %rdx | 59 | pushq %rdx |
57 | 60 | ||
58 | /* Set cr0 to a known state: | 61 | /* |
62 | * Set cr0 to a known state: | ||
59 | * - Paging enabled | 63 | * - Paging enabled |
60 | * - Alignment check disabled | 64 | * - Alignment check disabled |
61 | * - Write protect disabled | 65 | * - Write protect disabled |
@@ -68,7 +72,8 @@ identity_mapped: | |||
68 | orl $(X86_CR0_PG | X86_CR0_PE), %eax | 72 | orl $(X86_CR0_PG | X86_CR0_PE), %eax |
69 | movq %rax, %cr0 | 73 | movq %rax, %cr0 |
70 | 74 | ||
71 | /* Set cr4 to a known state: | 75 | /* |
76 | * Set cr4 to a known state: | ||
72 | * - physical address extension enabled | 77 | * - physical address extension enabled |
73 | */ | 78 | */ |
74 | movq $X86_CR4_PAE, %rax | 79 | movq $X86_CR4_PAE, %rax |
@@ -117,7 +122,8 @@ identity_mapped: | |||
117 | jmp 0b | 122 | jmp 0b |
118 | 3: | 123 | 3: |
119 | 124 | ||
120 | /* To be certain of avoiding problems with self-modifying code | 125 | /* |
126 | * To be certain of avoiding problems with self-modifying code | ||
121 | * I need to execute a serializing instruction here. | 127 | * I need to execute a serializing instruction here. |
122 | * So I flush the TLB by reloading %cr3 here, it's handy, | 128 | * So I flush the TLB by reloading %cr3 here, it's handy, |
123 | * and not processor dependent. | 129 | * and not processor dependent. |
@@ -125,8 +131,10 @@ identity_mapped: | |||
125 | movq %cr3, %rax | 131 | movq %cr3, %rax |
126 | movq %rax, %cr3 | 132 | movq %rax, %cr3 |
127 | 133 | ||
128 | /* set all of the registers to known values */ | 134 | /* |
129 | /* leave %rsp alone */ | 135 | * set all of the registers to known values |
136 | * leave %rsp alone | ||
137 | */ | ||
130 | 138 | ||
131 | xorq %rax, %rax | 139 | xorq %rax, %rax |
132 | xorq %rbx, %rbx | 140 | xorq %rbx, %rbx |