aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2014-01-03 14:08:39 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2014-01-06 10:44:04 -0500
commitb621e157ba48fb7d36945405de68c5fa25e7b73c (patch)
tree410dbd6445045602471d4b47f032c6260e91e5fb /arch/x86
parent32df75cd148b43e007848ddbfdb1ea25535114cb (diff)
xen/mmu: Cleanup xen_pagetable_p2m_copy a bit.
Stefano noticed that the code runs only under 64-bit so the comments about 32-bit are pointless. Also we change the condition for xen_revector_p2m_tree returning the same value (because it could not allocate a swath of space to put the new P2M in) or it had been called once already. In such we return early from the function. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/mmu.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c140efffe37e..9d74249542c5 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1209,29 +1209,29 @@ static void __init xen_pagetable_p2m_copy(void)
1209 1209
1210 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); 1210 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1211 1211
1212 /* On 32-bit, we get zero so this never gets executed. */
1213 new_mfn_list = xen_revector_p2m_tree(); 1212 new_mfn_list = xen_revector_p2m_tree();
1214 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) { 1213 /* No memory or already called. */
1215 /* using __ka address and sticking INVALID_P2M_ENTRY! */ 1214 if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list)
1216 memset((void *)xen_start_info->mfn_list, 0xff, size);
1217
1218 /* We should be in __ka space. */
1219 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1220 addr = xen_start_info->mfn_list;
1221 /* We roundup to the PMD, which means that if anybody at this stage is
1222 * using the __ka address of xen_start_info or xen_start_info->shared_info
1223 * they are in going to crash. Fortunatly we have already revectored
1224 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1225 size = roundup(size, PMD_SIZE);
1226 xen_cleanhighmap(addr, addr + size);
1227
1228 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1229 memblock_free(__pa(xen_start_info->mfn_list), size);
1230 /* And revector! Bye bye old array */
1231 xen_start_info->mfn_list = new_mfn_list;
1232 } else
1233 return; 1215 return;
1234 1216
1217 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1218 memset((void *)xen_start_info->mfn_list, 0xff, size);
1219
1220 /* We should be in __ka space. */
1221 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1222 addr = xen_start_info->mfn_list;
1223 /* We roundup to the PMD, which means that if anybody at this stage is
1224 * using the __ka address of xen_start_info or xen_start_info->shared_info
1225 * they are in going to crash. Fortunatly we have already revectored
1226 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1227 size = roundup(size, PMD_SIZE);
1228 xen_cleanhighmap(addr, addr + size);
1229
1230 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1231 memblock_free(__pa(xen_start_info->mfn_list), size);
1232 /* And revector! Bye bye old array */
1233 xen_start_info->mfn_list = new_mfn_list;
1234
1235 /* At this stage, cleanup_highmap has already cleaned __ka space 1235 /* At this stage, cleanup_highmap has already cleaned __ka space
1236 * from _brk_limit way up to the max_pfn_mapped (which is the end of 1236 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1237 * the ramdisk). We continue on, erasing PMD entries that point to page 1237 * the ramdisk). We continue on, erasing PMD entries that point to page