aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@novell.com>2011-08-16 10:07:41 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-08-17 10:26:48 -0400
commitccbcdf7cf1b5f6c6db30d84095b9c6c53043af55 (patch)
treed1c37475f699fcf1ef48f829f379f7cecda357e1 /arch/x86/xen
parent30eefc95841ce51c3281876f0b954dd1d3c0bd5f (diff)
xen/x86: replace order-based range checking of M2P table by linear one
The order-based approach is not only less efficient (requiring a shift and a compare, typical generated code looking like this mov eax, [machine_to_phys_order] mov ecx, eax shr ebx, cl test ebx, ebx jnz ... whereas a direct check requires just a compare, like in cmp ebx, [machine_to_phys_nr] jae ... ), but also slightly dangerous in the 32-on-64 case - the element address calculation can wrap if the next power of two boundary is sufficiently far away from the actual upper limit of the table, and hence can result in user space addresses being accessed (with it being unknown what may actually be mapped there). Additionally, the elimination of the mistaken use of fls() here (should have been __fls()) fixes a latent issue on x86-64 that would trigger if the code was run on a system with memory extending beyond the 44-bit boundary. CC: stable@kernel.org Signed-off-by: Jan Beulich <jbeulich@novell.com> [v1: Based on Jeremy's feedback] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/mmu.c12
2 files changed, 10 insertions, 6 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 974a528458a0..b960429d5b65 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(xen_domain_type);
77 77
78unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; 78unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
79EXPORT_SYMBOL(machine_to_phys_mapping); 79EXPORT_SYMBOL(machine_to_phys_mapping);
80unsigned int machine_to_phys_order; 80unsigned long machine_to_phys_nr;
81EXPORT_SYMBOL(machine_to_phys_order); 81EXPORT_SYMBOL(machine_to_phys_nr);
82 82
83struct start_info *xen_start_info; 83struct start_info *xen_start_info;
84EXPORT_SYMBOL_GPL(xen_start_info); 84EXPORT_SYMBOL_GPL(xen_start_info);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index f987bde77c49..24abc1f50dc5 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1713,15 +1713,19 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1713void __init xen_setup_machphys_mapping(void) 1713void __init xen_setup_machphys_mapping(void)
1714{ 1714{
1715 struct xen_machphys_mapping mapping; 1715 struct xen_machphys_mapping mapping;
1716 unsigned long machine_to_phys_nr_ents;
1717 1716
1718 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { 1717 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1719 machine_to_phys_mapping = (unsigned long *)mapping.v_start; 1718 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1720 machine_to_phys_nr_ents = mapping.max_mfn + 1; 1719 machine_to_phys_nr = mapping.max_mfn + 1;
1721 } else { 1720 } else {
1722 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; 1721 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1723 } 1722 }
1724 machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); 1723#ifdef CONFIG_X86_32
1724 if ((machine_to_phys_mapping + machine_to_phys_nr)
1725 < machine_to_phys_mapping)
1726 machine_to_phys_nr = (unsigned long *)NULL
1727 - machine_to_phys_mapping;
1728#endif
1725} 1729}
1726 1730
1727#ifdef CONFIG_X86_64 1731#ifdef CONFIG_X86_64