aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-08-20 20:02:18 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-21 07:52:57 -0400
commit168d2f464ab9860f0d1e66cf1f9684973222f1c6 (patch)
treec3a4199cf2aa4c6322b7860c431fd5bfdc4220ef
parent7708ad64a24a674f7905aa7a5099a50f055debec (diff)
xen: save previous spinlock when blocking
A spinlock can be interrupted while spinning, so make sure we preserve the previous lock of interest if we're taking a lock from within an interrupt handler. We also need to deal with the case where the blocking path gets interrupted between testing to see if the lock is free and actually blocking. If we get interrupted there and end up in the state where the lock is free but the irq isn't pending, then we'll block indefinitely in the hypervisor. This fix is to make sure that any nested lock-takers will always leave the irq pending if there's any chance the outer lock became free. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/xen/spinlock.c65
-rw-r--r--drivers/xen/events.c25
-rw-r--r--include/xen/events.h2
3 files changed, 77 insertions, 15 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 8dc4d31da67f..4884bc603aa7 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -47,25 +47,41 @@ static int xen_spin_trylock(struct raw_spinlock *lock)
47static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 47static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
48static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); 48static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
49 49
50static inline void spinning_lock(struct xen_spinlock *xl) 50/*
51 * Mark a cpu as interested in a lock. Returns the CPU's previous
52 * lock of interest, in case we got preempted by an interrupt.
53 */
54static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
51{ 55{
56 struct xen_spinlock *prev;
57
58 prev = __get_cpu_var(lock_spinners);
52 __get_cpu_var(lock_spinners) = xl; 59 __get_cpu_var(lock_spinners) = xl;
60
53 wmb(); /* set lock of interest before count */ 61 wmb(); /* set lock of interest before count */
62
54 asm(LOCK_PREFIX " incw %0" 63 asm(LOCK_PREFIX " incw %0"
55 : "+m" (xl->spinners) : : "memory"); 64 : "+m" (xl->spinners) : : "memory");
65
66 return prev;
56} 67}
57 68
58static inline void unspinning_lock(struct xen_spinlock *xl) 69/*
70 * Mark a cpu as no longer interested in a lock. Restores previous
71 * lock of interest (NULL for none).
72 */
73static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
59{ 74{
60 asm(LOCK_PREFIX " decw %0" 75 asm(LOCK_PREFIX " decw %0"
61 : "+m" (xl->spinners) : : "memory"); 76 : "+m" (xl->spinners) : : "memory");
62 wmb(); /* decrement count before clearing lock */ 77 wmb(); /* decrement count before restoring lock */
63 __get_cpu_var(lock_spinners) = NULL; 78 __get_cpu_var(lock_spinners) = prev;
64} 79}
65 80
66static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) 81static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
67{ 82{
68 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 83 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
84 struct xen_spinlock *prev;
69 int irq = __get_cpu_var(lock_kicker_irq); 85 int irq = __get_cpu_var(lock_kicker_irq);
70 int ret; 86 int ret;
71 87
@@ -74,23 +90,42 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
74 return 0; 90 return 0;
75 91
76 /* announce we're spinning */ 92 /* announce we're spinning */
77 spinning_lock(xl); 93 prev = spinning_lock(xl);
78 94
79 /* clear pending */ 95 do {
80 xen_clear_irq_pending(irq); 96 /* clear pending */
97 xen_clear_irq_pending(irq);
98
99 /* check again make sure it didn't become free while
100 we weren't looking */
101 ret = xen_spin_trylock(lock);
102 if (ret) {
103 /*
104 * If we interrupted another spinlock while it
105 * was blocking, make sure it doesn't block
106 * without rechecking the lock.
107 */
108 if (prev != NULL)
109 xen_set_irq_pending(irq);
110 goto out;
111 }
81 112
82 /* check again make sure it didn't become free while 113 /*
83 we weren't looking */ 114 * Block until irq becomes pending. If we're
84 ret = xen_spin_trylock(lock); 115 * interrupted at this point (after the trylock but
85 if (ret) 116 * before entering the block), then the nested lock
86 goto out; 117 * handler guarantees that the irq will be left
118 * pending if there's any chance the lock became free;
119 * xen_poll_irq() returns immediately if the irq is
120 * pending.
121 */
122 xen_poll_irq(irq);
123 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
87 124
88 /* block until irq becomes pending */
89 xen_poll_irq(irq);
90 kstat_this_cpu.irqs[irq]++; 125 kstat_this_cpu.irqs[irq]++;
91 126
92out: 127out:
93 unspinning_lock(xl); 128 unspinning_lock(xl, prev);
94 return ret; 129 return ret;
95} 130}
96 131
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index a0837036d898..b6c2b8f16bee 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -164,6 +164,12 @@ static inline void set_evtchn(int port)
164 sync_set_bit(port, &s->evtchn_pending[0]); 164 sync_set_bit(port, &s->evtchn_pending[0]);
165} 165}
166 166
167static inline int test_evtchn(int port)
168{
169 struct shared_info *s = HYPERVISOR_shared_info;
170 return sync_test_bit(port, &s->evtchn_pending[0]);
171}
172
167 173
168/** 174/**
169 * notify_remote_via_irq - send event to remote end of event channel via irq 175 * notify_remote_via_irq - send event to remote end of event channel via irq
@@ -732,6 +738,25 @@ void xen_clear_irq_pending(int irq)
732 clear_evtchn(evtchn); 738 clear_evtchn(evtchn);
733} 739}
734 740
741void xen_set_irq_pending(int irq)
742{
743 int evtchn = evtchn_from_irq(irq);
744
745 if (VALID_EVTCHN(evtchn))
746 set_evtchn(evtchn);
747}
748
749bool xen_test_irq_pending(int irq)
750{
751 int evtchn = evtchn_from_irq(irq);
752 bool ret = false;
753
754 if (VALID_EVTCHN(evtchn))
755 ret = test_evtchn(evtchn);
756
757 return ret;
758}
759
735/* Poll waiting for an irq to become pending. In the usual case, the 760/* Poll waiting for an irq to become pending. In the usual case, the
736 irq will be disabled so it won't deliver an interrupt. */ 761 irq will be disabled so it won't deliver an interrupt. */
737void xen_poll_irq(int irq) 762void xen_poll_irq(int irq)
diff --git a/include/xen/events.h b/include/xen/events.h
index 4680ff3fbc91..0d5f1adc0363 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -46,6 +46,8 @@ extern void xen_irq_resume(void);
46 46
47/* Clear an irq's pending state, in preparation for polling on it */ 47/* Clear an irq's pending state, in preparation for polling on it */
48void xen_clear_irq_pending(int irq); 48void xen_clear_irq_pending(int irq);
49void xen_set_irq_pending(int irq);
50bool xen_test_irq_pending(int irq);
49 51
50/* Poll waiting for an irq to become pending. In the usual case, the 52/* Poll waiting for an irq to become pending. In the usual case, the
51 irq will be disabled so it won't deliver an interrupt. */ 53 irq will be disabled so it won't deliver an interrupt. */