diff options
Diffstat (limited to 'arch/x86/xen/smp.c')
| -rw-r--r-- | arch/x86/xen/smp.c | 27 |
1 files changed, 15 insertions, 12 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index acd9b6705e02..c44e2069c7c7 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #include "xen-ops.h" | 33 | #include "xen-ops.h" |
| 34 | #include "mmu.h" | 34 | #include "mmu.h" |
| 35 | 35 | ||
| 36 | cpumask_t xen_cpu_initialized_map; | 36 | cpumask_var_t xen_cpu_initialized_map; |
| 37 | 37 | ||
| 38 | static DEFINE_PER_CPU(int, resched_irq); | 38 | static DEFINE_PER_CPU(int, resched_irq); |
| 39 | static DEFINE_PER_CPU(int, callfunc_irq); | 39 | static DEFINE_PER_CPU(int, callfunc_irq); |
| @@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void) | |||
| 158 | { | 158 | { |
| 159 | int i, rc; | 159 | int i, rc; |
| 160 | 160 | ||
| 161 | for (i = 0; i < NR_CPUS; i++) { | 161 | for (i = 0; i < nr_cpu_ids; i++) { |
| 162 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | 162 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); |
| 163 | if (rc >= 0) { | 163 | if (rc >= 0) { |
| 164 | num_processors++; | 164 | num_processors++; |
| @@ -192,11 +192,14 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
| 192 | if (xen_smp_intr_init(0)) | 192 | if (xen_smp_intr_init(0)) |
| 193 | BUG(); | 193 | BUG(); |
| 194 | 194 | ||
| 195 | xen_cpu_initialized_map = cpumask_of_cpu(0); | 195 | if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) |
| 196 | panic("could not allocate xen_cpu_initialized_map\n"); | ||
| 197 | |||
| 198 | cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); | ||
| 196 | 199 | ||
| 197 | /* Restrict the possible_map according to max_cpus. */ | 200 | /* Restrict the possible_map according to max_cpus. */ |
| 198 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { | 201 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { |
| 199 | for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) | 202 | for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) |
| 200 | continue; | 203 | continue; |
| 201 | cpu_clear(cpu, cpu_possible_map); | 204 | cpu_clear(cpu, cpu_possible_map); |
| 202 | } | 205 | } |
| @@ -221,7 +224,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
| 221 | struct vcpu_guest_context *ctxt; | 224 | struct vcpu_guest_context *ctxt; |
| 222 | struct desc_struct *gdt; | 225 | struct desc_struct *gdt; |
| 223 | 226 | ||
| 224 | if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) | 227 | if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) |
| 225 | return 0; | 228 | return 0; |
| 226 | 229 | ||
| 227 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); | 230 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
| @@ -408,24 +411,23 @@ static void xen_smp_send_reschedule(int cpu) | |||
| 408 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | 411 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); |
| 409 | } | 412 | } |
| 410 | 413 | ||
| 411 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | 414 | static void xen_send_IPI_mask(const struct cpumask *mask, |
| 415 | enum ipi_vector vector) | ||
| 412 | { | 416 | { |
| 413 | unsigned cpu; | 417 | unsigned cpu; |
| 414 | 418 | ||
| 415 | cpus_and(mask, mask, cpu_online_map); | 419 | for_each_cpu_and(cpu, mask, cpu_online_mask) |
| 416 | |||
| 417 | for_each_cpu_mask_nr(cpu, mask) | ||
| 418 | xen_send_IPI_one(cpu, vector); | 420 | xen_send_IPI_one(cpu, vector); |
| 419 | } | 421 | } |
| 420 | 422 | ||
| 421 | static void xen_smp_send_call_function_ipi(cpumask_t mask) | 423 | static void xen_smp_send_call_function_ipi(const struct cpumask *mask) |
| 422 | { | 424 | { |
| 423 | int cpu; | 425 | int cpu; |
| 424 | 426 | ||
| 425 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | 427 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); |
| 426 | 428 | ||
| 427 | /* Make sure other vcpus get a chance to run if they need to. */ | 429 | /* Make sure other vcpus get a chance to run if they need to. */ |
| 428 | for_each_cpu_mask_nr(cpu, mask) { | 430 | for_each_cpu(cpu, mask) { |
| 429 | if (xen_vcpu_stolen(cpu)) { | 431 | if (xen_vcpu_stolen(cpu)) { |
| 430 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | 432 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); |
| 431 | break; | 433 | break; |
| @@ -435,7 +437,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask) | |||
| 435 | 437 | ||
| 436 | static void xen_smp_send_call_function_single_ipi(int cpu) | 438 | static void xen_smp_send_call_function_single_ipi(int cpu) |
| 437 | { | 439 | { |
| 438 | xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); | 440 | xen_send_IPI_mask(cpumask_of(cpu), |
| 441 | XEN_CALL_FUNCTION_SINGLE_VECTOR); | ||
| 439 | } | 442 | } |
| 440 | 443 | ||
| 441 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | 444 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) |
