diff options
author | Gerald Schaefer <gerald.schaefer@de.ibm.com> | 2010-02-26 16:37:40 -0500 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2010-02-26 16:37:31 -0500 |
commit | 59b697874529f5c3cbcaf5816b3d6c584af521e8 (patch) | |
tree | c395952c2a0bb9a3027a37a30dd37cc93b1a7c3f /arch/s390 | |
parent | 8387c736fcbaec17890b8d075ee4f4623518b54a (diff) |
[S390] spinlock: check virtual cpu running status
This patch introduces a new function that checks the running status
of a cpu in a hypervisor. This status is not virtualized, so the check
is only correct if running in an LPAR. On acquiring a spinlock, if the
cpu holding the lock is scheduled by the hypervisor, we do a busy wait
on the lock. If it is not scheduled, we yield over to that cpu.
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/sigp.h | 40 | ||||
-rw-r--r-- | arch/s390/include/asm/smp.h | 24 | ||||
-rw-r--r-- | arch/s390/lib/spinlock.c | 53 |
3 files changed, 80 insertions, 37 deletions
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index 8aa46ce4229c..e3bffd4e2d66 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h | |||
@@ -25,29 +25,28 @@ static inline int cpu_logical_map(int cpu) | |||
25 | } | 25 | } |
26 | 26 | ||
27 | enum { | 27 | enum { |
28 | sigp_unassigned=0x0, | 28 | sigp_sense = 1, |
29 | sigp_sense, | 29 | sigp_external_call = 2, |
30 | sigp_external_call, | 30 | sigp_emergency_signal = 3, |
31 | sigp_emergency_signal, | 31 | sigp_start = 4, |
32 | sigp_start, | 32 | sigp_stop = 5, |
33 | sigp_stop, | 33 | sigp_restart = 6, |
34 | sigp_restart, | 34 | sigp_stop_and_store_status = 9, |
35 | sigp_unassigned1, | 35 | sigp_initial_cpu_reset = 11, |
36 | sigp_unassigned2, | 36 | sigp_cpu_reset = 12, |
37 | sigp_stop_and_store_status, | 37 | sigp_set_prefix = 13, |
38 | sigp_unassigned3, | 38 | sigp_store_status_at_address = 14, |
39 | sigp_initial_cpu_reset, | 39 | sigp_store_extended_status_at_address = 15, |
40 | sigp_cpu_reset, | 40 | sigp_set_architecture = 18, |
41 | sigp_set_prefix, | 41 | sigp_conditional_emergency_signal = 19, |
42 | sigp_store_status_at_address, | 42 | sigp_sense_running = 21, |
43 | sigp_store_extended_status_at_address | ||
44 | }; | 43 | }; |
45 | 44 | ||
46 | enum { | 45 | enum { |
47 | sigp_order_code_accepted=0, | 46 | sigp_order_code_accepted = 0, |
48 | sigp_status_stored, | 47 | sigp_status_stored = 1, |
49 | sigp_busy, | 48 | sigp_busy = 2, |
50 | sigp_not_operational | 49 | sigp_not_operational = 3, |
51 | }; | 50 | }; |
52 | 51 | ||
53 | /* | 52 | /* |
@@ -57,7 +56,6 @@ enum { | |||
57 | ec_schedule = 0, | 56 | ec_schedule = 0, |
58 | ec_call_function, | 57 | ec_call_function, |
59 | ec_call_function_single, | 58 | ec_call_function_single, |
60 | ec_bit_last | ||
61 | }; | 59 | }; |
62 | 60 | ||
63 | /* | 61 | /* |
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index c2d0e638f892..edc03cb9cd79 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -36,6 +36,28 @@ extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp, | |||
36 | int from, int to); | 36 | int from, int to); |
37 | extern void smp_restart_cpu(void); | 37 | extern void smp_restart_cpu(void); |
38 | 38 | ||
39 | /* | ||
40 | * returns 1 if (virtual) cpu is scheduled | ||
41 | * returns 0 otherwise | ||
42 | */ | ||
43 | static inline int smp_vcpu_scheduled(int cpu) | ||
44 | { | ||
45 | u32 status; | ||
46 | |||
47 | switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) { | ||
48 | case sigp_status_stored: | ||
49 | /* Check for running status */ | ||
50 | if (status & 0x400) | ||
51 | return 0; | ||
52 | break; | ||
53 | case sigp_not_operational: | ||
54 | return 0; | ||
55 | default: | ||
56 | break; | ||
57 | } | ||
58 | return 1; | ||
59 | } | ||
60 | |||
39 | #else /* CONFIG_SMP */ | 61 | #else /* CONFIG_SMP */ |
40 | 62 | ||
41 | static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | 63 | static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) |
@@ -43,6 +65,8 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | |||
43 | func(data); | 65 | func(data); |
44 | } | 66 | } |
45 | 67 | ||
68 | #define smp_vcpu_scheduled (1) | ||
69 | |||
46 | #endif /* CONFIG_SMP */ | 70 | #endif /* CONFIG_SMP */ |
47 | 71 | ||
48 | #ifdef CONFIG_HOTPLUG_CPU | 72 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index cff327f109a8..91754ffb9203 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -43,16 +43,24 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) | |||
43 | { | 43 | { |
44 | int count = spin_retry; | 44 | int count = spin_retry; |
45 | unsigned int cpu = ~smp_processor_id(); | 45 | unsigned int cpu = ~smp_processor_id(); |
46 | unsigned int owner; | ||
46 | 47 | ||
47 | while (1) { | 48 | while (1) { |
48 | if (count-- <= 0) { | 49 | owner = lp->owner_cpu; |
49 | unsigned int owner = lp->owner_cpu; | 50 | if (!owner || smp_vcpu_scheduled(~owner)) { |
50 | if (owner != 0) | 51 | for (count = spin_retry; count > 0; count--) { |
51 | _raw_yield_cpu(~owner); | 52 | if (arch_spin_is_locked(lp)) |
52 | count = spin_retry; | 53 | continue; |
54 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, | ||
55 | cpu) == 0) | ||
56 | return; | ||
57 | } | ||
58 | if (MACHINE_IS_LPAR) | ||
59 | continue; | ||
53 | } | 60 | } |
54 | if (arch_spin_is_locked(lp)) | 61 | owner = lp->owner_cpu; |
55 | continue; | 62 | if (owner) |
63 | _raw_yield_cpu(~owner); | ||
56 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 64 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
57 | return; | 65 | return; |
58 | } | 66 | } |
@@ -63,17 +71,27 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | |||
63 | { | 71 | { |
64 | int count = spin_retry; | 72 | int count = spin_retry; |
65 | unsigned int cpu = ~smp_processor_id(); | 73 | unsigned int cpu = ~smp_processor_id(); |
74 | unsigned int owner; | ||
66 | 75 | ||
67 | local_irq_restore(flags); | 76 | local_irq_restore(flags); |
68 | while (1) { | 77 | while (1) { |
69 | if (count-- <= 0) { | 78 | owner = lp->owner_cpu; |
70 | unsigned int owner = lp->owner_cpu; | 79 | if (!owner || smp_vcpu_scheduled(~owner)) { |
71 | if (owner != 0) | 80 | for (count = spin_retry; count > 0; count--) { |
72 | _raw_yield_cpu(~owner); | 81 | if (arch_spin_is_locked(lp)) |
73 | count = spin_retry; | 82 | continue; |
83 | local_irq_disable(); | ||
84 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, | ||
85 | cpu) == 0) | ||
86 | return; | ||
87 | local_irq_restore(flags); | ||
88 | } | ||
89 | if (MACHINE_IS_LPAR) | ||
90 | continue; | ||
74 | } | 91 | } |
75 | if (arch_spin_is_locked(lp)) | 92 | owner = lp->owner_cpu; |
76 | continue; | 93 | if (owner) |
94 | _raw_yield_cpu(~owner); | ||
77 | local_irq_disable(); | 95 | local_irq_disable(); |
78 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 96 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
79 | return; | 97 | return; |
@@ -100,8 +118,11 @@ EXPORT_SYMBOL(arch_spin_trylock_retry); | |||
100 | void arch_spin_relax(arch_spinlock_t *lock) | 118 | void arch_spin_relax(arch_spinlock_t *lock) |
101 | { | 119 | { |
102 | unsigned int cpu = lock->owner_cpu; | 120 | unsigned int cpu = lock->owner_cpu; |
103 | if (cpu != 0) | 121 | if (cpu != 0) { |
104 | _raw_yield_cpu(~cpu); | 122 | if (MACHINE_IS_VM || MACHINE_IS_KVM || |
123 | !smp_vcpu_scheduled(~cpu)) | ||
124 | _raw_yield_cpu(~cpu); | ||
125 | } | ||
105 | } | 126 | } |
106 | EXPORT_SYMBOL(arch_spin_relax); | 127 | EXPORT_SYMBOL(arch_spin_relax); |
107 | 128 | ||