diff options
author | Boris Ostrovsky <boris.ostrovsky@oracle.com> | 2014-07-09 13:18:18 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-07-14 16:47:32 -0400 |
commit | 8762e5092828c4dc0f49da5a47a644c670df77f3 (patch) | |
tree | a54805cac2758322f4e997497267137c046e8574 /arch/x86/kernel | |
parent | e0463e42d7b73654f39f6a155f82f0b72ad5258a (diff) |
x86/espfix/xen: Fix allocation of pages for paravirt page tables
init_espfix_ap() is currently off by one level when informing hypervisor
that allocated pages will be used for ministacks' page tables.
The most immediate effect of this on a PV guest is that if
'stack_page = __get_free_page()' returns a non-zeroed-out page the hypervisor
will refuse to use it for a page table (which it shouldn't be anyway). This will
result in warnings by both Xen and Linux.
More importantly, a subsequent write to that page (again, by a PV guest) is
likely to result in fatal page fault.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Link: http://lkml.kernel.org/r/1404926298-5565-1-git-send-email-boris.ostrovsky@oracle.com
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: <stable@vger.kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/espfix_64.c | 5 |
1 files changed, 2 insertions, 3 deletions
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 6afbb16e9b79..94d857fb1033 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c | |||
@@ -175,7 +175,7 @@ void init_espfix_ap(void) | |||
175 | if (!pud_present(pud)) { | 175 | if (!pud_present(pud)) { |
176 | pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); | 176 | pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); |
177 | pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); | 177 | pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); |
178 | paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); | 178 | paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); |
179 | for (n = 0; n < ESPFIX_PUD_CLONES; n++) | 179 | for (n = 0; n < ESPFIX_PUD_CLONES; n++) |
180 | set_pud(&pud_p[n], pud); | 180 | set_pud(&pud_p[n], pud); |
181 | } | 181 | } |
@@ -185,7 +185,7 @@ void init_espfix_ap(void) | |||
185 | if (!pmd_present(pmd)) { | 185 | if (!pmd_present(pmd)) { |
186 | pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); | 186 | pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); |
187 | pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); | 187 | pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); |
188 | paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT); | 188 | paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); |
189 | for (n = 0; n < ESPFIX_PMD_CLONES; n++) | 189 | for (n = 0; n < ESPFIX_PMD_CLONES; n++) |
190 | set_pmd(&pmd_p[n], pmd); | 190 | set_pmd(&pmd_p[n], pmd); |
191 | } | 191 | } |
@@ -193,7 +193,6 @@ void init_espfix_ap(void) | |||
193 | pte_p = pte_offset_kernel(&pmd, addr); | 193 | pte_p = pte_offset_kernel(&pmd, addr); |
194 | stack_page = (void *)__get_free_page(GFP_KERNEL); | 194 | stack_page = (void *)__get_free_page(GFP_KERNEL); |
195 | pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); | 195 | pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); |
196 | paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT); | ||
197 | for (n = 0; n < ESPFIX_PTE_CLONES; n++) | 196 | for (n = 0; n < ESPFIX_PTE_CLONES; n++) |
198 | set_pte(&pte_p[n*PTE_STRIDE], pte); | 197 | set_pte(&pte_p[n*PTE_STRIDE], pte); |
199 | 198 | ||