diff options
Diffstat (limited to 'arch/x86/xen/spinlock.c')
-rw-r--r-- | arch/x86/xen/spinlock.c | 65 |
1 files changed, 50 insertions, 15 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 8dc4d31da67f..4884bc603aa7 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -47,25 +47,41 @@ static int xen_spin_trylock(struct raw_spinlock *lock) | |||
47 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | 47 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; |
48 | static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); | 48 | static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); |
49 | 49 | ||
50 | static inline void spinning_lock(struct xen_spinlock *xl) | 50 | /* |
51 | * Mark a cpu as interested in a lock. Returns the CPU's previous | ||
52 | * lock of interest, in case we got preempted by an interrupt. | ||
53 | */ | ||
54 | static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) | ||
51 | { | 55 | { |
56 | struct xen_spinlock *prev; | ||
57 | |||
58 | prev = __get_cpu_var(lock_spinners); | ||
52 | __get_cpu_var(lock_spinners) = xl; | 59 | __get_cpu_var(lock_spinners) = xl; |
60 | |||
53 | wmb(); /* set lock of interest before count */ | 61 | wmb(); /* set lock of interest before count */ |
62 | |||
54 | asm(LOCK_PREFIX " incw %0" | 63 | asm(LOCK_PREFIX " incw %0" |
55 | : "+m" (xl->spinners) : : "memory"); | 64 | : "+m" (xl->spinners) : : "memory"); |
65 | |||
66 | return prev; | ||
56 | } | 67 | } |
57 | 68 | ||
58 | static inline void unspinning_lock(struct xen_spinlock *xl) | 69 | /* |
70 | * Mark a cpu as no longer interested in a lock. Restores previous | ||
71 | * lock of interest (NULL for none). | ||
72 | */ | ||
73 | static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) | ||
59 | { | 74 | { |
60 | asm(LOCK_PREFIX " decw %0" | 75 | asm(LOCK_PREFIX " decw %0" |
61 | : "+m" (xl->spinners) : : "memory"); | 76 | : "+m" (xl->spinners) : : "memory"); |
62 | wmb(); /* decrement count before clearing lock */ | 77 | wmb(); /* decrement count before restoring lock */ |
63 | __get_cpu_var(lock_spinners) = NULL; | 78 | __get_cpu_var(lock_spinners) = prev; |
64 | } | 79 | } |
65 | 80 | ||
66 | static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) | 81 | static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) |
67 | { | 82 | { |
68 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 83 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
84 | struct xen_spinlock *prev; | ||
69 | int irq = __get_cpu_var(lock_kicker_irq); | 85 | int irq = __get_cpu_var(lock_kicker_irq); |
70 | int ret; | 86 | int ret; |
71 | 87 | ||
@@ -74,23 +90,42 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) | |||
74 | return 0; | 90 | return 0; |
75 | 91 | ||
76 | /* announce we're spinning */ | 92 | /* announce we're spinning */ |
77 | spinning_lock(xl); | 93 | prev = spinning_lock(xl); |
78 | 94 | ||
79 | /* clear pending */ | 95 | do { |
80 | xen_clear_irq_pending(irq); | 96 | /* clear pending */ |
97 | xen_clear_irq_pending(irq); | ||
98 | |||
99 | /* check again make sure it didn't become free while | ||
100 | we weren't looking */ | ||
101 | ret = xen_spin_trylock(lock); | ||
102 | if (ret) { | ||
103 | /* | ||
104 | * If we interrupted another spinlock while it | ||
105 | * was blocking, make sure it doesn't block | ||
106 | * without rechecking the lock. | ||
107 | */ | ||
108 | if (prev != NULL) | ||
109 | xen_set_irq_pending(irq); | ||
110 | goto out; | ||
111 | } | ||
81 | 112 | ||
82 | /* check again make sure it didn't become free while | 113 | /* |
83 | we weren't looking */ | 114 | * Block until irq becomes pending. If we're |
84 | ret = xen_spin_trylock(lock); | 115 | * interrupted at this point (after the trylock but |
85 | if (ret) | 116 | * before entering the block), then the nested lock |
86 | goto out; | 117 | * handler guarantees that the irq will be left |
118 | * pending if there's any chance the lock became free; | ||
119 | * xen_poll_irq() returns immediately if the irq is | ||
120 | * pending. | ||
121 | */ | ||
122 | xen_poll_irq(irq); | ||
123 | } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ | ||
87 | 124 | ||
88 | /* block until irq becomes pending */ | ||
89 | xen_poll_irq(irq); | ||
90 | kstat_this_cpu.irqs[irq]++; | 125 | kstat_this_cpu.irqs[irq]++; |
91 | 126 | ||
92 | out: | 127 | out: |
93 | unspinning_lock(xl); | 128 | unspinning_lock(xl, prev); |
94 | return ret; | 129 | return ret; |
95 | } | 130 | } |
96 | 131 | ||