aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-10-16 14:51:30 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2007-10-16 14:51:30 -0400
commitf0d733942750c1ee6358c3a4a1a5d7ba73b7122f (patch)
tree32d0802b60078b6a2f43ce19d9019033ada6485d
parentd626a1f1cbbdfac90c529216e40a4fa2a22ecc3d (diff)
xen: yield to IPI target if necessary
When sending a call-function IPI to a vcpu, yield if the vcpu isn't running. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
-rw-r--r--arch/x86/xen/smp.c14
-rw-r--r--arch/x86/xen/time.c6
-rw-r--r--arch/x86/xen/xen-ops.h2
3 files changed, 18 insertions, 4 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 557b8e24706a..865953e6f341 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -360,7 +360,8 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
360 void *info, int wait) 360 void *info, int wait)
361{ 361{
362 struct call_data_struct data; 362 struct call_data_struct data;
363 int cpus; 363 int cpus, cpu;
364 bool yield;
364 365
365 /* Holding any lock stops cpus from going down. */ 366 /* Holding any lock stops cpus from going down. */
366 spin_lock(&call_lock); 367 spin_lock(&call_lock);
@@ -389,9 +390,14 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
389 /* Send a message to other CPUs and wait for them to respond */ 390 /* Send a message to other CPUs and wait for them to respond */
390 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 391 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
391 392
392 /* Make sure other vcpus get a chance to run. 393 /* Make sure other vcpus get a chance to run if they need to. */
393 XXX too severe? Maybe we should check the other CPU's states? */ 394 yield = false;
394 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 395 for_each_cpu_mask(cpu, mask)
396 if (xen_vcpu_stolen(cpu))
397 yield = true;
398
399 if (yield)
400 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
395 401
396 /* Wait for response */ 402 /* Wait for response */
397 while (atomic_read(&data.started) != cpus || 403 while (atomic_read(&data.started) != cpus ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index dfd6db69ead5..d083ff5ef088 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -105,6 +105,12 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
105 } while (get64(&state->state_entry_time) != state_time); 105 } while (get64(&state->state_entry_time) != state_time);
106} 106}
107 107
108/* return true when a vcpu could run but has no real cpu to run on */
109bool xen_vcpu_stolen(int vcpu)
110{
111 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
112}
113
108static void setup_runstate_info(int cpu) 114static void setup_runstate_info(int cpu)
109{ 115{
110 struct vcpu_register_runstate_memory_area area; 116 struct vcpu_register_runstate_memory_area area;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index b5697bae52d0..3847eed0bb09 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -27,6 +27,8 @@ unsigned long xen_get_wallclock(void);
27int xen_set_wallclock(unsigned long time); 27int xen_set_wallclock(unsigned long time);
28unsigned long long xen_sched_clock(void); 28unsigned long long xen_sched_clock(void);
29 29
30bool xen_vcpu_stolen(int vcpu);
31
30void xen_mark_init_mm_pinned(void); 32void xen_mark_init_mm_pinned(void);
31 33
32void __init xen_fill_possible_map(void); 34void __init xen_fill_possible_map(void);