aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/machine_kexec.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/machine_kexec.c')
-rw-r--r--arch/x86_64/kernel/machine_kexec.c101
1 files changed, 41 insertions, 60 deletions
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 60d1eff41567..89fab51e20f4 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -8,43 +8,26 @@
8 8
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/kexec.h> 10#include <linux/kexec.h>
11#include <linux/delay.h>
12#include <linux/string.h> 11#include <linux/string.h>
13#include <linux/reboot.h> 12#include <linux/reboot.h>
14#include <asm/pda.h>
15#include <asm/pgtable.h> 13#include <asm/pgtable.h>
16#include <asm/pgalloc.h>
17#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
18#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
19#include <asm/io.h> 16#include <asm/io.h>
20#include <asm/apic.h> 17
21#include <asm/cpufeature.h> 18static void init_level2_page(pmd_t *level2p, unsigned long addr)
22#include <asm/hw_irq.h>
23
24#define LEVEL0_SIZE (1UL << 12UL)
25#define LEVEL1_SIZE (1UL << 21UL)
26#define LEVEL2_SIZE (1UL << 30UL)
27#define LEVEL3_SIZE (1UL << 39UL)
28#define LEVEL4_SIZE (1UL << 48UL)
29
30#define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
31#define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE)
32#define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
33#define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
34
35static void init_level2_page(u64 *level2p, unsigned long addr)
36{ 19{
37 unsigned long end_addr; 20 unsigned long end_addr;
38 21
39 addr &= PAGE_MASK; 22 addr &= PAGE_MASK;
40 end_addr = addr + LEVEL2_SIZE; 23 end_addr = addr + PUD_SIZE;
41 while (addr < end_addr) { 24 while (addr < end_addr) {
42 *(level2p++) = addr | L1_ATTR; 25 set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
43 addr += LEVEL1_SIZE; 26 addr += PMD_SIZE;
44 } 27 }
45} 28}
46 29
47static int init_level3_page(struct kimage *image, u64 *level3p, 30static int init_level3_page(struct kimage *image, pud_t *level3p,
48 unsigned long addr, unsigned long last_addr) 31 unsigned long addr, unsigned long last_addr)
49{ 32{
50 unsigned long end_addr; 33 unsigned long end_addr;
@@ -52,32 +35,32 @@ static int init_level3_page(struct kimage *image, u64 *level3p,
52 35
53 result = 0; 36 result = 0;
54 addr &= PAGE_MASK; 37 addr &= PAGE_MASK;
55 end_addr = addr + LEVEL3_SIZE; 38 end_addr = addr + PGDIR_SIZE;
56 while ((addr < last_addr) && (addr < end_addr)) { 39 while ((addr < last_addr) && (addr < end_addr)) {
57 struct page *page; 40 struct page *page;
58 u64 *level2p; 41 pmd_t *level2p;
59 42
60 page = kimage_alloc_control_pages(image, 0); 43 page = kimage_alloc_control_pages(image, 0);
61 if (!page) { 44 if (!page) {
62 result = -ENOMEM; 45 result = -ENOMEM;
63 goto out; 46 goto out;
64 } 47 }
65 level2p = (u64 *)page_address(page); 48 level2p = (pmd_t *)page_address(page);
66 init_level2_page(level2p, addr); 49 init_level2_page(level2p, addr);
67 *(level3p++) = __pa(level2p) | L2_ATTR; 50 set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
68 addr += LEVEL2_SIZE; 51 addr += PUD_SIZE;
69 } 52 }
70 /* clear the unused entries */ 53 /* clear the unused entries */
71 while (addr < end_addr) { 54 while (addr < end_addr) {
72 *(level3p++) = 0; 55 pud_clear(level3p++);
73 addr += LEVEL2_SIZE; 56 addr += PUD_SIZE;
74 } 57 }
75out: 58out:
76 return result; 59 return result;
77} 60}
78 61
79 62
80static int init_level4_page(struct kimage *image, u64 *level4p, 63static int init_level4_page(struct kimage *image, pgd_t *level4p,
81 unsigned long addr, unsigned long last_addr) 64 unsigned long addr, unsigned long last_addr)
82{ 65{
83 unsigned long end_addr; 66 unsigned long end_addr;
@@ -85,28 +68,28 @@ static int init_level4_page(struct kimage *image, u64 *level4p,
85 68
86 result = 0; 69 result = 0;
87 addr &= PAGE_MASK; 70 addr &= PAGE_MASK;
88 end_addr = addr + LEVEL4_SIZE; 71 end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
89 while ((addr < last_addr) && (addr < end_addr)) { 72 while ((addr < last_addr) && (addr < end_addr)) {
90 struct page *page; 73 struct page *page;
91 u64 *level3p; 74 pud_t *level3p;
92 75
93 page = kimage_alloc_control_pages(image, 0); 76 page = kimage_alloc_control_pages(image, 0);
94 if (!page) { 77 if (!page) {
95 result = -ENOMEM; 78 result = -ENOMEM;
96 goto out; 79 goto out;
97 } 80 }
98 level3p = (u64 *)page_address(page); 81 level3p = (pud_t *)page_address(page);
99 result = init_level3_page(image, level3p, addr, last_addr); 82 result = init_level3_page(image, level3p, addr, last_addr);
100 if (result) { 83 if (result) {
101 goto out; 84 goto out;
102 } 85 }
103 *(level4p++) = __pa(level3p) | L3_ATTR; 86 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
104 addr += LEVEL3_SIZE; 87 addr += PGDIR_SIZE;
105 } 88 }
106 /* clear the unused entries */ 89 /* clear the unused entries */
107 while (addr < end_addr) { 90 while (addr < end_addr) {
108 *(level4p++) = 0; 91 pgd_clear(level4p++);
109 addr += LEVEL3_SIZE; 92 addr += PGDIR_SIZE;
110 } 93 }
111out: 94out:
112 return result; 95 return result;
@@ -115,52 +98,50 @@ out:
115 98
116static int init_pgtable(struct kimage *image, unsigned long start_pgtable) 99static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
117{ 100{
118 u64 *level4p; 101 pgd_t *level4p;
119 level4p = (u64 *)__va(start_pgtable); 102 level4p = (pgd_t *)__va(start_pgtable);
120 return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); 103 return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
121} 104}
122 105
123static void set_idt(void *newidt, u16 limit) 106static void set_idt(void *newidt, u16 limit)
124{ 107{
125 unsigned char curidt[10]; 108 struct desc_ptr curidt;
126 109
127 /* x86-64 supports unaliged loads & stores */ 110 /* x86-64 supports unaliged loads & stores */
128 (*(u16 *)(curidt)) = limit; 111 curidt.size = limit;
129 (*(u64 *)(curidt +2)) = (unsigned long)(newidt); 112 curidt.address = (unsigned long)newidt;
130 113
131 __asm__ __volatile__ ( 114 __asm__ __volatile__ (
132 "lidt %0\n" 115 "lidtq %0\n"
133 : "=m" (curidt) 116 : : "m" (curidt)
134 ); 117 );
135}; 118};
136 119
137 120
138static void set_gdt(void *newgdt, u16 limit) 121static void set_gdt(void *newgdt, u16 limit)
139{ 122{
140 unsigned char curgdt[10]; 123 struct desc_ptr curgdt;
141 124
142 /* x86-64 supports unaligned loads & stores */ 125 /* x86-64 supports unaligned loads & stores */
143 (*(u16 *)(curgdt)) = limit; 126 curgdt.size = limit;
144 (*(u64 *)(curgdt +2)) = (unsigned long)(newgdt); 127 curgdt.address = (unsigned long)newgdt;
145 128
146 __asm__ __volatile__ ( 129 __asm__ __volatile__ (
147 "lgdt %0\n" 130 "lgdtq %0\n"
148 : "=m" (curgdt) 131 : : "m" (curgdt)
149 ); 132 );
150}; 133};
151 134
152static void load_segments(void) 135static void load_segments(void)
153{ 136{
154 __asm__ __volatile__ ( 137 __asm__ __volatile__ (
155 "\tmovl $"STR(__KERNEL_DS)",%eax\n" 138 "\tmovl %0,%%ds\n"
156 "\tmovl %eax,%ds\n" 139 "\tmovl %0,%%es\n"
157 "\tmovl %eax,%es\n" 140 "\tmovl %0,%%ss\n"
158 "\tmovl %eax,%ss\n" 141 "\tmovl %0,%%fs\n"
159 "\tmovl %eax,%fs\n" 142 "\tmovl %0,%%gs\n"
160 "\tmovl %eax,%gs\n" 143 : : "a" (__KERNEL_DS)
161 ); 144 );
162#undef STR
163#undef __STR
164} 145}
165 146
166typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page, 147typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
@@ -178,7 +159,7 @@ int machine_kexec_prepare(struct kimage *image)
178 159
179 /* Calculate the offsets */ 160 /* Calculate the offsets */
180 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 161 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
181 control_code_buffer = start_pgtable + 4096UL; 162 control_code_buffer = start_pgtable + PAGE_SIZE;
182 163
183 /* Setup the identity mapped 64bit page table */ 164 /* Setup the identity mapped 64bit page table */
184 result = init_pgtable(image, start_pgtable); 165 result = init_pgtable(image, start_pgtable);
@@ -214,7 +195,7 @@ NORET_TYPE void machine_kexec(struct kimage *image)
214 /* Calculate the offsets */ 195 /* Calculate the offsets */
215 page_list = image->head; 196 page_list = image->head;
216 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 197 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
217 control_code_buffer = start_pgtable + 4096UL; 198 control_code_buffer = start_pgtable + PAGE_SIZE;
218 199
219 /* Set the low half of the page table to my identity mapped 200 /* Set the low half of the page table to my identity mapped
220 * page table for kexec. Leave the high half pointing at the 201 * page table for kexec. Leave the high half pointing at the