diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-06-26 05:21:54 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-06-26 05:21:54 -0400 |
commit | 3b16cf874861436725c43ba0b68bdd799297be7c (patch) | |
tree | 8e48647e3dce5dde6917f260f93c4b9f19945c55 /arch/x86/xen | |
parent | 3d4422332711ef48ef0f132f1fcbfcbd56c7f3d1 (diff) |
x86: convert to generic helpers for IPI function calls
This converts x86, x86-64, and xen to use the new helpers for
smp_call_function() and friends, and adds support for
smp_call_function_single().
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/enlighten.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 133 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 9 |
4 files changed, 52 insertions, 96 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index f09c1c69c37a..8e317782fe37 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1108,7 +1108,9 @@ static const struct smp_ops xen_smp_ops __initdata = { | |||
1108 | 1108 | ||
1109 | .smp_send_stop = xen_smp_send_stop, | 1109 | .smp_send_stop = xen_smp_send_stop, |
1110 | .smp_send_reschedule = xen_smp_send_reschedule, | 1110 | .smp_send_reschedule = xen_smp_send_reschedule, |
1111 | .smp_call_function_mask = xen_smp_call_function_mask, | 1111 | |
1112 | .send_call_func_ipi = xen_smp_send_call_function_ipi, | ||
1113 | .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, | ||
1112 | }; | 1114 | }; |
1113 | #endif /* CONFIG_SMP */ | 1115 | #endif /* CONFIG_SMP */ |
1114 | 1116 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index df40bf74ea75..5c01590380bc 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -558,7 +558,7 @@ static void drop_mm_ref(struct mm_struct *mm) | |||
558 | } | 558 | } |
559 | 559 | ||
560 | if (!cpus_empty(mask)) | 560 | if (!cpus_empty(mask)) |
561 | xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); | 561 | smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); |
562 | } | 562 | } |
563 | #else | 563 | #else |
564 | static void drop_mm_ref(struct mm_struct *mm) | 564 | static void drop_mm_ref(struct mm_struct *mm) |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 94e69000f982..b3786e749b8e 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -36,27 +36,14 @@ | |||
36 | #include "mmu.h" | 36 | #include "mmu.h" |
37 | 37 | ||
38 | static cpumask_t xen_cpu_initialized_map; | 38 | static cpumask_t xen_cpu_initialized_map; |
39 | static DEFINE_PER_CPU(int, resched_irq) = -1; | ||
40 | static DEFINE_PER_CPU(int, callfunc_irq) = -1; | ||
41 | static DEFINE_PER_CPU(int, debug_irq) = -1; | ||
42 | |||
43 | /* | ||
44 | * Structure and data for smp_call_function(). This is designed to minimise | ||
45 | * static memory requirements. It also looks cleaner. | ||
46 | */ | ||
47 | static DEFINE_SPINLOCK(call_lock); | ||
48 | 39 | ||
49 | struct call_data_struct { | 40 | static DEFINE_PER_CPU(int, resched_irq); |
50 | void (*func) (void *info); | 41 | static DEFINE_PER_CPU(int, callfunc_irq); |
51 | void *info; | 42 | static DEFINE_PER_CPU(int, callfuncsingle_irq); |
52 | atomic_t started; | 43 | static DEFINE_PER_CPU(int, debug_irq) = -1; |
53 | atomic_t finished; | ||
54 | int wait; | ||
55 | }; | ||
56 | 44 | ||
57 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | 45 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); |
58 | 46 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | |
59 | static struct call_data_struct *call_data; | ||
60 | 47 | ||
61 | /* | 48 | /* |
62 | * Reschedule call back. Nothing to do, | 49 | * Reschedule call back. Nothing to do, |
@@ -122,6 +109,17 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
122 | goto fail; | 109 | goto fail; |
123 | per_cpu(debug_irq, cpu) = rc; | 110 | per_cpu(debug_irq, cpu) = rc; |
124 | 111 | ||
112 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); | ||
113 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, | ||
114 | cpu, | ||
115 | xen_call_function_single_interrupt, | ||
116 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | ||
117 | callfunc_name, | ||
118 | NULL); | ||
119 | if (rc < 0) | ||
120 | goto fail; | ||
121 | per_cpu(callfuncsingle_irq, cpu) = rc; | ||
122 | |||
125 | return 0; | 123 | return 0; |
126 | 124 | ||
127 | fail: | 125 | fail: |
@@ -131,6 +129,9 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
131 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 129 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); |
132 | if (per_cpu(debug_irq, cpu) >= 0) | 130 | if (per_cpu(debug_irq, cpu) >= 0) |
133 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 131 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); |
132 | if (per_cpu(callfuncsingle_irq, cpu) >= 0) | ||
133 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | ||
134 | |||
134 | return rc; | 135 | return rc; |
135 | } | 136 | } |
136 | 137 | ||
@@ -338,7 +339,6 @@ void xen_smp_send_reschedule(int cpu) | |||
338 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | 339 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); |
339 | } | 340 | } |
340 | 341 | ||
341 | |||
342 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | 342 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) |
343 | { | 343 | { |
344 | unsigned cpu; | 344 | unsigned cpu; |
@@ -349,83 +349,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | |||
349 | xen_send_IPI_one(cpu, vector); | 349 | xen_send_IPI_one(cpu, vector); |
350 | } | 350 | } |
351 | 351 | ||
352 | void xen_smp_send_call_function_ipi(cpumask_t mask) | ||
353 | { | ||
354 | int cpu; | ||
355 | |||
356 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | ||
357 | |||
358 | /* Make sure other vcpus get a chance to run if they need to. */ | ||
359 | for_each_cpu_mask(cpu, mask) { | ||
360 | if (xen_vcpu_stolen(cpu)) { | ||
361 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | ||
362 | break; | ||
363 | } | ||
364 | } | ||
365 | } | ||
366 | |||
367 | void xen_smp_send_call_function_single_ipi(int cpu) | ||
368 | { | ||
369 | xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); | ||
370 | } | ||
371 | |||
352 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | 372 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) |
353 | { | 373 | { |
354 | void (*func) (void *info) = call_data->func; | ||
355 | void *info = call_data->info; | ||
356 | int wait = call_data->wait; | ||
357 | |||
358 | /* | ||
359 | * Notify initiating CPU that I've grabbed the data and am | ||
360 | * about to execute the function | ||
361 | */ | ||
362 | mb(); | ||
363 | atomic_inc(&call_data->started); | ||
364 | /* | ||
365 | * At this point the info structure may be out of scope unless wait==1 | ||
366 | */ | ||
367 | irq_enter(); | 374 | irq_enter(); |
368 | (*func)(info); | 375 | generic_smp_call_function_interrupt(); |
369 | __get_cpu_var(irq_stat).irq_call_count++; | 376 | __get_cpu_var(irq_stat).irq_call_count++; |
370 | irq_exit(); | 377 | irq_exit(); |
371 | 378 | ||
372 | if (wait) { | ||
373 | mb(); /* commit everything before setting finished */ | ||
374 | atomic_inc(&call_data->finished); | ||
375 | } | ||
376 | |||
377 | return IRQ_HANDLED; | 379 | return IRQ_HANDLED; |
378 | } | 380 | } |
379 | 381 | ||
380 | int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | 382 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) |
381 | void *info, int wait) | ||
382 | { | 383 | { |
383 | struct call_data_struct data; | 384 | irq_enter(); |
384 | int cpus, cpu; | 385 | generic_smp_call_function_single_interrupt(); |
385 | bool yield; | 386 | __get_cpu_var(irq_stat).irq_call_count++; |
386 | 387 | irq_exit(); | |
387 | /* Holding any lock stops cpus from going down. */ | ||
388 | spin_lock(&call_lock); | ||
389 | |||
390 | cpu_clear(smp_processor_id(), mask); | ||
391 | |||
392 | cpus = cpus_weight(mask); | ||
393 | if (!cpus) { | ||
394 | spin_unlock(&call_lock); | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | /* Can deadlock when called with interrupts disabled */ | ||
399 | WARN_ON(irqs_disabled()); | ||
400 | |||
401 | data.func = func; | ||
402 | data.info = info; | ||
403 | atomic_set(&data.started, 0); | ||
404 | data.wait = wait; | ||
405 | if (wait) | ||
406 | atomic_set(&data.finished, 0); | ||
407 | |||
408 | call_data = &data; | ||
409 | mb(); /* write everything before IPI */ | ||
410 | |||
411 | /* Send a message to other CPUs and wait for them to respond */ | ||
412 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | ||
413 | |||
414 | /* Make sure other vcpus get a chance to run if they need to. */ | ||
415 | yield = false; | ||
416 | for_each_cpu_mask(cpu, mask) | ||
417 | if (xen_vcpu_stolen(cpu)) | ||
418 | yield = true; | ||
419 | |||
420 | if (yield) | ||
421 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | ||
422 | |||
423 | /* Wait for response */ | ||
424 | while (atomic_read(&data.started) != cpus || | ||
425 | (wait && atomic_read(&data.finished) != cpus)) | ||
426 | cpu_relax(); | ||
427 | |||
428 | spin_unlock(&call_lock); | ||
429 | 388 | ||
430 | return 0; | 389 | return IRQ_HANDLED; |
431 | } | 390 | } |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index f1063ae08037..a636ab5e1341 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -46,13 +46,8 @@ void xen_smp_cpus_done(unsigned int max_cpus); | |||
46 | 46 | ||
47 | void xen_smp_send_stop(void); | 47 | void xen_smp_send_stop(void); |
48 | void xen_smp_send_reschedule(int cpu); | 48 | void xen_smp_send_reschedule(int cpu); |
49 | int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic, | 49 | void xen_smp_send_call_function_ipi(cpumask_t mask); |
50 | int wait); | 50 | void xen_smp_send_call_function_single_ipi(int cpu); |
51 | int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
52 | int nonatomic, int wait); | ||
53 | |||
54 | int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
55 | void *info, int wait); | ||
56 | 51 | ||
57 | 52 | ||
58 | /* Declare an asm function, along with symbols needed to make it | 53 | /* Declare an asm function, along with symbols needed to make it |