aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2010-10-26 12:28:33 -0400
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>2010-10-26 15:33:15 -0400
commitea5b8f73933e34d2b47a65284c46d26d49e7edb9 (patch)
tree0c7a4b44dc46d2dc66ac6822fde3bf9f12f14bbc
parent0e058e527784a9a23f7ed7a73ffafebb53a889da (diff)
xen: initialize cpu masks for pv guests in xen_smp_init
Pv guests don't have ACPI and need the cpu masks to be set correctly as early as possible so we call xen_fill_possible_map from xen_smp_init. On the other hand the initial domain supports ACPI so in this case we skip xen_fill_possible_map and rely on it. However Xen might limit the number of cpus usable by the domain, so we filter those masks during smp initialization using the VCPUOP_is_up hypercall. It is important that the filtering is done before xen_setup_vcpu_info_placement. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
-rw-r--r--arch/x86/xen/smp.c24
1 files changed, 22 insertions, 2 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 138676781dd4..834dfeb54e31 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -28,6 +28,7 @@
28#include <asm/xen/interface.h> 28#include <asm/xen/interface.h>
29#include <asm/xen/hypercall.h> 29#include <asm/xen/hypercall.h>
30 30
31#include <xen/xen.h>
31#include <xen/page.h> 32#include <xen/page.h>
32#include <xen/events.h> 33#include <xen/events.h>
33 34
@@ -156,6 +157,25 @@ static void __init xen_fill_possible_map(void)
156{ 157{
157 int i, rc; 158 int i, rc;
158 159
160 if (xen_initial_domain())
161 return;
162
163 for (i = 0; i < nr_cpu_ids; i++) {
164 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
165 if (rc >= 0) {
166 num_processors++;
167 set_cpu_possible(i, true);
168 }
169 }
170}
171
172static void __init xen_filter_cpu_maps(void)
173{
174 int i, rc;
175
176 if (!xen_initial_domain())
177 return;
178
159 num_processors = 0; 179 num_processors = 0;
160 disabled_cpus = 0; 180 disabled_cpus = 0;
161 for (i = 0; i < nr_cpu_ids; i++) { 181 for (i = 0; i < nr_cpu_ids; i++) {
@@ -179,6 +199,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
179 old memory can be recycled */ 199 old memory can be recycled */
180 make_lowmem_page_readwrite(xen_initial_gdt); 200 make_lowmem_page_readwrite(xen_initial_gdt);
181 201
202 xen_filter_cpu_maps();
182 xen_setup_vcpu_info_placement(); 203 xen_setup_vcpu_info_placement();
183} 204}
184 205
@@ -195,8 +216,6 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
195 if (xen_smp_intr_init(0)) 216 if (xen_smp_intr_init(0))
196 BUG(); 217 BUG();
197 218
198 xen_fill_possible_map();
199
200 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) 219 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
201 panic("could not allocate xen_cpu_initialized_map\n"); 220 panic("could not allocate xen_cpu_initialized_map\n");
202 221
@@ -487,5 +506,6 @@ static const struct smp_ops xen_smp_ops __initdata = {
487void __init xen_smp_init(void) 506void __init xen_smp_init(void)
488{ 507{
489 smp_ops = xen_smp_ops; 508 smp_ops = xen_smp_ops;
509 xen_fill_possible_map();
490 xen_init_spinlocks(); 510 xen_init_spinlocks();
491} 511}