aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/smp.h
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@de.ibm.com>2010-02-26 16:37:40 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2010-02-26 16:37:31 -0500
commit59b697874529f5c3cbcaf5816b3d6c584af521e8 (patch)
treec395952c2a0bb9a3027a37a30dd37cc93b1a7c3f /arch/s390/include/asm/smp.h
parent8387c736fcbaec17890b8d075ee4f4623518b54a (diff)
[S390] spinlock: check virtual cpu running status
This patch introduces a new function that checks the running status of a cpu in a hypervisor. This status is not virtualized, so the check is only correct if running in an LPAR. On acquiring a spinlock, if the cpu holding the lock is scheduled by the hypervisor, we do a busy wait on the lock. If it is not scheduled, we yield over to that cpu. Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/smp.h')
-rw-r--r--arch/s390/include/asm/smp.h24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index c2d0e638f892..edc03cb9cd79 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -36,6 +36,28 @@ extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
36 int from, int to); 36 int from, int to);
37extern void smp_restart_cpu(void); 37extern void smp_restart_cpu(void);
38 38
39/*
40 * returns 1 if (virtual) cpu is scheduled
41 * returns 0 otherwise
42 */
43static inline int smp_vcpu_scheduled(int cpu)
44{
45 u32 status;
46
47 switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
48 case sigp_status_stored:
49 /* Check for running status */
50 if (status & 0x400)
51 return 0;
52 break;
53 case sigp_not_operational:
54 return 0;
55 default:
56 break;
57 }
58 return 1;
59}
60
39#else /* CONFIG_SMP */ 61#else /* CONFIG_SMP */
40 62
41static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) 63static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
@@ -43,6 +65,8 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
43 func(data); 65 func(data);
44} 66}
45 67
68#define smp_vcpu_scheduled (1)
69
46#endif /* CONFIG_SMP */ 70#endif /* CONFIG_SMP */
47 71
48#ifdef CONFIG_HOTPLUG_CPU 72#ifdef CONFIG_HOTPLUG_CPU