diff options
Diffstat (limited to 'arch/x86/xen/smp.c')
-rw-r--r-- | arch/x86/xen/smp.c | 90 |
1 files changed, 80 insertions, 10 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 25f232b18a82..b4533a86d7e4 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/xen/interface.h> | 28 | #include <asm/xen/interface.h> |
29 | #include <asm/xen/hypercall.h> | 29 | #include <asm/xen/hypercall.h> |
30 | 30 | ||
31 | #include <xen/xen.h> | ||
31 | #include <xen/page.h> | 32 | #include <xen/page.h> |
32 | #include <xen/events.h> | 33 | #include <xen/events.h> |
33 | 34 | ||
@@ -45,18 +46,17 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | |||
45 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | 46 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); |
46 | 47 | ||
47 | /* | 48 | /* |
48 | * Reschedule call back. Nothing to do, | 49 | * Reschedule call back. |
49 | * all the work is done automatically when | ||
50 | * we return from the interrupt. | ||
51 | */ | 50 | */ |
52 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | 51 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) |
53 | { | 52 | { |
54 | inc_irq_stat(irq_resched_count); | 53 | inc_irq_stat(irq_resched_count); |
54 | scheduler_ipi(); | ||
55 | 55 | ||
56 | return IRQ_HANDLED; | 56 | return IRQ_HANDLED; |
57 | } | 57 | } |
58 | 58 | ||
59 | static __cpuinit void cpu_bringup(void) | 59 | static void __cpuinit cpu_bringup(void) |
60 | { | 60 | { |
61 | int cpu = smp_processor_id(); | 61 | int cpu = smp_processor_id(); |
62 | 62 | ||
@@ -84,7 +84,7 @@ static __cpuinit void cpu_bringup(void) | |||
84 | wmb(); /* make sure everything is out */ | 84 | wmb(); /* make sure everything is out */ |
85 | } | 85 | } |
86 | 86 | ||
87 | static __cpuinit void cpu_bringup_and_idle(void) | 87 | static void __cpuinit cpu_bringup_and_idle(void) |
88 | { | 88 | { |
89 | cpu_bringup(); | 89 | cpu_bringup(); |
90 | cpu_idle(); | 90 | cpu_idle(); |
@@ -156,6 +156,9 @@ static void __init xen_fill_possible_map(void) | |||
156 | { | 156 | { |
157 | int i, rc; | 157 | int i, rc; |
158 | 158 | ||
159 | if (xen_initial_domain()) | ||
160 | return; | ||
161 | |||
159 | for (i = 0; i < nr_cpu_ids; i++) { | 162 | for (i = 0; i < nr_cpu_ids; i++) { |
160 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | 163 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); |
161 | if (rc >= 0) { | 164 | if (rc >= 0) { |
@@ -165,6 +168,27 @@ static void __init xen_fill_possible_map(void) | |||
165 | } | 168 | } |
166 | } | 169 | } |
167 | 170 | ||
171 | static void __init xen_filter_cpu_maps(void) | ||
172 | { | ||
173 | int i, rc; | ||
174 | |||
175 | if (!xen_initial_domain()) | ||
176 | return; | ||
177 | |||
178 | num_processors = 0; | ||
179 | disabled_cpus = 0; | ||
180 | for (i = 0; i < nr_cpu_ids; i++) { | ||
181 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | ||
182 | if (rc >= 0) { | ||
183 | num_processors++; | ||
184 | set_cpu_possible(i, true); | ||
185 | } else { | ||
186 | set_cpu_possible(i, false); | ||
187 | set_cpu_present(i, false); | ||
188 | } | ||
189 | } | ||
190 | } | ||
191 | |||
168 | static void __init xen_smp_prepare_boot_cpu(void) | 192 | static void __init xen_smp_prepare_boot_cpu(void) |
169 | { | 193 | { |
170 | BUG_ON(smp_processor_id() != 0); | 194 | BUG_ON(smp_processor_id() != 0); |
@@ -174,17 +198,25 @@ static void __init xen_smp_prepare_boot_cpu(void) | |||
174 | old memory can be recycled */ | 198 | old memory can be recycled */ |
175 | make_lowmem_page_readwrite(xen_initial_gdt); | 199 | make_lowmem_page_readwrite(xen_initial_gdt); |
176 | 200 | ||
201 | xen_filter_cpu_maps(); | ||
177 | xen_setup_vcpu_info_placement(); | 202 | xen_setup_vcpu_info_placement(); |
178 | } | 203 | } |
179 | 204 | ||
180 | static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | 205 | static void __init xen_smp_prepare_cpus(unsigned int max_cpus) |
181 | { | 206 | { |
182 | unsigned cpu; | 207 | unsigned cpu; |
208 | unsigned int i; | ||
183 | 209 | ||
184 | xen_init_lock_cpu(0); | 210 | xen_init_lock_cpu(0); |
185 | 211 | ||
186 | smp_store_cpu_info(0); | 212 | smp_store_cpu_info(0); |
187 | cpu_data(0).x86_max_cores = 1; | 213 | cpu_data(0).x86_max_cores = 1; |
214 | |||
215 | for_each_possible_cpu(i) { | ||
216 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); | ||
217 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | ||
218 | zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); | ||
219 | } | ||
188 | set_cpu_sibling_map(0); | 220 | set_cpu_sibling_map(0); |
189 | 221 | ||
190 | if (xen_smp_intr_init(0)) | 222 | if (xen_smp_intr_init(0)) |
@@ -216,7 +248,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
216 | } | 248 | } |
217 | } | 249 | } |
218 | 250 | ||
219 | static __cpuinit int | 251 | static int __cpuinit |
220 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | 252 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) |
221 | { | 253 | { |
222 | struct vcpu_guest_context *ctxt; | 254 | struct vcpu_guest_context *ctxt; |
@@ -400,9 +432,9 @@ static void stop_self(void *v) | |||
400 | BUG(); | 432 | BUG(); |
401 | } | 433 | } |
402 | 434 | ||
403 | static void xen_smp_send_stop(void) | 435 | static void xen_stop_other_cpus(int wait) |
404 | { | 436 | { |
405 | smp_call_function(stop_self, NULL, 0); | 437 | smp_call_function(stop_self, NULL, wait); |
406 | } | 438 | } |
407 | 439 | ||
408 | static void xen_smp_send_reschedule(int cpu) | 440 | static void xen_smp_send_reschedule(int cpu) |
@@ -460,7 +492,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |||
460 | return IRQ_HANDLED; | 492 | return IRQ_HANDLED; |
461 | } | 493 | } |
462 | 494 | ||
463 | static const struct smp_ops xen_smp_ops __initdata = { | 495 | static const struct smp_ops xen_smp_ops __initconst = { |
464 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, | 496 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, |
465 | .smp_prepare_cpus = xen_smp_prepare_cpus, | 497 | .smp_prepare_cpus = xen_smp_prepare_cpus, |
466 | .smp_cpus_done = xen_smp_cpus_done, | 498 | .smp_cpus_done = xen_smp_cpus_done, |
@@ -470,7 +502,7 @@ static const struct smp_ops xen_smp_ops __initdata = { | |||
470 | .cpu_disable = xen_cpu_disable, | 502 | .cpu_disable = xen_cpu_disable, |
471 | .play_dead = xen_play_dead, | 503 | .play_dead = xen_play_dead, |
472 | 504 | ||
473 | .smp_send_stop = xen_smp_send_stop, | 505 | .stop_other_cpus = xen_stop_other_cpus, |
474 | .smp_send_reschedule = xen_smp_send_reschedule, | 506 | .smp_send_reschedule = xen_smp_send_reschedule, |
475 | 507 | ||
476 | .send_call_func_ipi = xen_smp_send_call_function_ipi, | 508 | .send_call_func_ipi = xen_smp_send_call_function_ipi, |
@@ -483,3 +515,41 @@ void __init xen_smp_init(void) | |||
483 | xen_fill_possible_map(); | 515 | xen_fill_possible_map(); |
484 | xen_init_spinlocks(); | 516 | xen_init_spinlocks(); |
485 | } | 517 | } |
518 | |||
519 | static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) | ||
520 | { | ||
521 | native_smp_prepare_cpus(max_cpus); | ||
522 | WARN_ON(xen_smp_intr_init(0)); | ||
523 | |||
524 | if (!xen_have_vector_callback) | ||
525 | return; | ||
526 | xen_init_lock_cpu(0); | ||
527 | xen_init_spinlocks(); | ||
528 | } | ||
529 | |||
530 | static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) | ||
531 | { | ||
532 | int rc; | ||
533 | rc = native_cpu_up(cpu); | ||
534 | WARN_ON (xen_smp_intr_init(cpu)); | ||
535 | return rc; | ||
536 | } | ||
537 | |||
538 | static void xen_hvm_cpu_die(unsigned int cpu) | ||
539 | { | ||
540 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); | ||
541 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); | ||
542 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); | ||
543 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); | ||
544 | native_cpu_die(cpu); | ||
545 | } | ||
546 | |||
547 | void __init xen_hvm_smp_init(void) | ||
548 | { | ||
549 | smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; | ||
550 | smp_ops.smp_send_reschedule = xen_smp_send_reschedule; | ||
551 | smp_ops.cpu_up = xen_hvm_cpu_up; | ||
552 | smp_ops.cpu_die = xen_hvm_cpu_die; | ||
553 | smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; | ||
554 | smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; | ||
555 | } | ||