aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2013-08-09 10:21:59 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2013-08-09 10:54:03 -0400
commit1ed7bf5f5227169b661c619636f754b98001ec30 (patch)
tree401ebe5d78455719bb3cf65faaa6d2ad1ec38fad /arch/x86
parent96f853eaa889c7a22718d275b0df7bebdbd6780e (diff)
xen, pvticketlock: Allow interrupts to be enabled while blocking
If interrupts were enabled when taking the spinlock, we can leave them enabled while blocking to get the lock. If we can enable interrupts while waiting for the lock to become available, and we take an interrupt before entering the poll, and the handler takes a spinlock which ends up going into the slow state (invalidating the per-cpu "lock" and "want" values), then when the interrupt handler returns the event channel will remain pending so the poll will return immediately, causing it to return out to the main spinlock loop. Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org> Link: http://lkml.kernel.org/r/1376058122-8248-12-git-send-email-raghavendra.kt@linux.vnet.ibm.com Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/spinlock.c46
1 files changed, 40 insertions, 6 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 546112ed463f..0438b9324a72 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -142,7 +142,20 @@ static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
142 * partially setup state. 142 * partially setup state.
143 */ 143 */
144 local_irq_save(flags); 144 local_irq_save(flags);
145 145 /*
146 * We don't really care if we're overwriting some other
147 * (lock,want) pair, as that would mean that we're currently
148 * in an interrupt context, and the outer context had
149 * interrupts enabled. That has already kicked the VCPU out
150 * of xen_poll_irq(), so it will just return spuriously and
151 * retry with newly setup (lock,want).
152 *
153 * The ordering protocol on this is that the "lock" pointer
154 * may only be set non-NULL if the "want" ticket is correct.
155 * If we're updating "want", we must first clear "lock".
156 */
157 w->lock = NULL;
158 smp_wmb();
146 w->want = want; 159 w->want = want;
147 smp_wmb(); 160 smp_wmb();
148 w->lock = lock; 161 w->lock = lock;
@@ -157,24 +170,43 @@ static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
157 /* Only check lock once pending cleared */ 170 /* Only check lock once pending cleared */
158 barrier(); 171 barrier();
159 172
160 /* Mark entry to slowpath before doing the pickup test to make 173 /*
161 sure we don't deadlock with an unlocker. */ 174 * Mark entry to slowpath before doing the pickup test to make
175 * sure we don't deadlock with an unlocker.
176 */
162 __ticket_enter_slowpath(lock); 177 __ticket_enter_slowpath(lock);
163 178
164 /* check again make sure it didn't become free while 179 /*
165 we weren't looking */ 180 * check again make sure it didn't become free while
181 * we weren't looking
182 */
166 if (ACCESS_ONCE(lock->tickets.head) == want) { 183 if (ACCESS_ONCE(lock->tickets.head) == want) {
167 add_stats(TAKEN_SLOW_PICKUP, 1); 184 add_stats(TAKEN_SLOW_PICKUP, 1);
168 goto out; 185 goto out;
169 } 186 }
187
188 /* Allow interrupts while blocked */
189 local_irq_restore(flags);
190
191 /*
192 * If an interrupt happens here, it will leave the wakeup irq
193 * pending, which will cause xen_poll_irq() to return
194 * immediately.
195 */
196
170 /* Block until irq becomes pending (or perhaps a spurious wakeup) */ 197 /* Block until irq becomes pending (or perhaps a spurious wakeup) */
171 xen_poll_irq(irq); 198 xen_poll_irq(irq);
172 add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq)); 199 add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
200
201 local_irq_save(flags);
202
173 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); 203 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
174out: 204out:
175 cpumask_clear_cpu(cpu, &waiting_cpus); 205 cpumask_clear_cpu(cpu, &waiting_cpus);
176 w->lock = NULL; 206 w->lock = NULL;
207
177 local_irq_restore(flags); 208 local_irq_restore(flags);
209
178 spin_time_accum_blocked(start); 210 spin_time_accum_blocked(start);
179} 211}
180PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning); 212PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
@@ -188,7 +220,9 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
188 for_each_cpu(cpu, &waiting_cpus) { 220 for_each_cpu(cpu, &waiting_cpus) {
189 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); 221 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
190 222
191 if (w->lock == lock && w->want == next) { 223 /* Make sure we read lock before want */
224 if (ACCESS_ONCE(w->lock) == lock &&
225 ACCESS_ONCE(w->want) == next) {
192 add_stats(RELEASED_SLOW_KICKED, 1); 226 add_stats(RELEASED_SLOW_KICKED, 1);
193 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 227 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
194 break; 228 break;