diff options
author | Yinghai Lu <yinghai@kernel.org> | 2013-01-24 15:20:04 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2013-01-29 18:26:29 -0500 |
commit | 9ebdc79f7a177d3098b89ba8ef2dd2b235163685 (patch) | |
tree | 6b4f765d5056aa2b52ccb074bc6f9061f3264e42 /arch/x86/kernel | |
parent | 084d1283986a530828b8898f206adf44d5d3146d (diff) |
x86, kexec: Replace ident_mapping_init and init_level4_page
Now ident_mapping_init is checking if pgd/pud is present for every 2M,
so several 2Ms are in same PUD, it will keep checking if pud is there
with same pud.
init_level4_page just does not check existing pgd/pud.
We could use generic mapping_init with different settings in info to
replace those two local grown version functions.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-24-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/machine_kexec_64.c | 161 |
1 files changed, 26 insertions, 135 deletions
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index be14ee120c43..d2d7e023a8c8 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -16,144 +16,12 @@ | |||
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
18 | 18 | ||
19 | #include <asm/init.h> | ||
19 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
20 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
21 | #include <asm/mmu_context.h> | 22 | #include <asm/mmu_context.h> |
22 | #include <asm/debugreg.h> | 23 | #include <asm/debugreg.h> |
23 | 24 | ||
24 | static int init_one_level2_page(struct kimage *image, pgd_t *pgd, | ||
25 | unsigned long addr) | ||
26 | { | ||
27 | pud_t *pud; | ||
28 | pmd_t *pmd; | ||
29 | struct page *page; | ||
30 | int result = -ENOMEM; | ||
31 | |||
32 | addr &= PMD_MASK; | ||
33 | pgd += pgd_index(addr); | ||
34 | if (!pgd_present(*pgd)) { | ||
35 | page = kimage_alloc_control_pages(image, 0); | ||
36 | if (!page) | ||
37 | goto out; | ||
38 | pud = (pud_t *)page_address(page); | ||
39 | clear_page(pud); | ||
40 | set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); | ||
41 | } | ||
42 | pud = pud_offset(pgd, addr); | ||
43 | if (!pud_present(*pud)) { | ||
44 | page = kimage_alloc_control_pages(image, 0); | ||
45 | if (!page) | ||
46 | goto out; | ||
47 | pmd = (pmd_t *)page_address(page); | ||
48 | clear_page(pmd); | ||
49 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | ||
50 | } | ||
51 | pmd = pmd_offset(pud, addr); | ||
52 | if (!pmd_present(*pmd)) | ||
53 | set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); | ||
54 | result = 0; | ||
55 | out: | ||
56 | return result; | ||
57 | } | ||
58 | |||
59 | static int ident_mapping_init(struct kimage *image, pgd_t *level4p, | ||
60 | unsigned long mstart, unsigned long mend) | ||
61 | { | ||
62 | int result; | ||
63 | |||
64 | mstart = round_down(mstart, PMD_SIZE); | ||
65 | mend = round_up(mend - 1, PMD_SIZE); | ||
66 | |||
67 | while (mstart < mend) { | ||
68 | result = init_one_level2_page(image, level4p, mstart); | ||
69 | if (result) | ||
70 | return result; | ||
71 | |||
72 | mstart += PMD_SIZE; | ||
73 | } | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static void init_level2_page(pmd_t *level2p, unsigned long addr) | ||
79 | { | ||
80 | unsigned long end_addr; | ||
81 | |||
82 | addr &= PAGE_MASK; | ||
83 | end_addr = addr + PUD_SIZE; | ||
84 | while (addr < end_addr) { | ||
85 | set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); | ||
86 | addr += PMD_SIZE; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | static int init_level3_page(struct kimage *image, pud_t *level3p, | ||
91 | unsigned long addr, unsigned long last_addr) | ||
92 | { | ||
93 | unsigned long end_addr; | ||
94 | int result; | ||
95 | |||
96 | result = 0; | ||
97 | addr &= PAGE_MASK; | ||
98 | end_addr = addr + PGDIR_SIZE; | ||
99 | while ((addr < last_addr) && (addr < end_addr)) { | ||
100 | struct page *page; | ||
101 | pmd_t *level2p; | ||
102 | |||
103 | page = kimage_alloc_control_pages(image, 0); | ||
104 | if (!page) { | ||
105 | result = -ENOMEM; | ||
106 | goto out; | ||
107 | } | ||
108 | level2p = (pmd_t *)page_address(page); | ||
109 | init_level2_page(level2p, addr); | ||
110 | set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE)); | ||
111 | addr += PUD_SIZE; | ||
112 | } | ||
113 | /* clear the unused entries */ | ||
114 | while (addr < end_addr) { | ||
115 | pud_clear(level3p++); | ||
116 | addr += PUD_SIZE; | ||
117 | } | ||
118 | out: | ||
119 | return result; | ||
120 | } | ||
121 | |||
122 | |||
123 | static int init_level4_page(struct kimage *image, pgd_t *level4p, | ||
124 | unsigned long addr, unsigned long last_addr) | ||
125 | { | ||
126 | unsigned long end_addr; | ||
127 | int result; | ||
128 | |||
129 | result = 0; | ||
130 | addr &= PAGE_MASK; | ||
131 | end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE); | ||
132 | while ((addr < last_addr) && (addr < end_addr)) { | ||
133 | struct page *page; | ||
134 | pud_t *level3p; | ||
135 | |||
136 | page = kimage_alloc_control_pages(image, 0); | ||
137 | if (!page) { | ||
138 | result = -ENOMEM; | ||
139 | goto out; | ||
140 | } | ||
141 | level3p = (pud_t *)page_address(page); | ||
142 | result = init_level3_page(image, level3p, addr, last_addr); | ||
143 | if (result) | ||
144 | goto out; | ||
145 | set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE)); | ||
146 | addr += PGDIR_SIZE; | ||
147 | } | ||
148 | /* clear the unused entries */ | ||
149 | while (addr < end_addr) { | ||
150 | pgd_clear(level4p++); | ||
151 | addr += PGDIR_SIZE; | ||
152 | } | ||
153 | out: | ||
154 | return result; | ||
155 | } | ||
156 | |||
157 | static void free_transition_pgtable(struct kimage *image) | 25 | static void free_transition_pgtable(struct kimage *image) |
158 | { | 26 | { |
159 | free_page((unsigned long)image->arch.pud); | 27 | free_page((unsigned long)image->arch.pud); |
@@ -203,15 +71,37 @@ err: | |||
203 | return result; | 71 | return result; |
204 | } | 72 | } |
205 | 73 | ||
74 | static void *alloc_pgt_page(void *data) | ||
75 | { | ||
76 | struct kimage *image = (struct kimage *)data; | ||
77 | struct page *page; | ||
78 | void *p = NULL; | ||
79 | |||
80 | page = kimage_alloc_control_pages(image, 0); | ||
81 | if (page) { | ||
82 | p = page_address(page); | ||
83 | clear_page(p); | ||
84 | } | ||
85 | |||
86 | return p; | ||
87 | } | ||
88 | |||
206 | static int init_pgtable(struct kimage *image, unsigned long start_pgtable) | 89 | static int init_pgtable(struct kimage *image, unsigned long start_pgtable) |
207 | { | 90 | { |
91 | struct x86_mapping_info info = { | ||
92 | .alloc_pgt_page = alloc_pgt_page, | ||
93 | .context = image, | ||
94 | .pmd_flag = __PAGE_KERNEL_LARGE_EXEC, | ||
95 | }; | ||
208 | unsigned long mstart, mend; | 96 | unsigned long mstart, mend; |
209 | pgd_t *level4p; | 97 | pgd_t *level4p; |
210 | int result; | 98 | int result; |
211 | int i; | 99 | int i; |
212 | 100 | ||
213 | level4p = (pgd_t *)__va(start_pgtable); | 101 | level4p = (pgd_t *)__va(start_pgtable); |
214 | result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT); | 102 | clear_page(level4p); |
103 | result = kernel_ident_mapping_init(&info, level4p, | ||
104 | 0, max_pfn << PAGE_SHIFT); | ||
215 | if (result) | 105 | if (result) |
216 | return result; | 106 | return result; |
217 | 107 | ||
@@ -225,7 +115,8 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable) | |||
225 | mstart = image->segment[i].mem; | 115 | mstart = image->segment[i].mem; |
226 | mend = mstart + image->segment[i].memsz; | 116 | mend = mstart + image->segment[i].memsz; |
227 | 117 | ||
228 | result = ident_mapping_init(image, level4p, mstart, mend); | 118 | result = kernel_ident_mapping_init(&info, |
119 | level4p, mstart, mend); | ||
229 | 120 | ||
230 | if (result) | 121 | if (result) |
231 | return result; | 122 | return result; |