aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/spinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/spinlock.c')
-rw-r--r--arch/x86/xen/spinlock.c250
1 files changed, 1 insertions, 249 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index f42e78de1e10..3d6e0064cbfc 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -21,8 +21,6 @@ static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
21static DEFINE_PER_CPU(char *, irq_name); 21static DEFINE_PER_CPU(char *, irq_name);
22static bool xen_pvspin = true; 22static bool xen_pvspin = true;
23 23
24#ifdef CONFIG_QUEUED_SPINLOCKS
25
26#include <asm/qspinlock.h> 24#include <asm/qspinlock.h>
27 25
28static void xen_qlock_kick(int cpu) 26static void xen_qlock_kick(int cpu)
@@ -71,207 +69,6 @@ static void xen_qlock_wait(u8 *byte, u8 val)
71 xen_poll_irq(irq); 69 xen_poll_irq(irq);
72} 70}
73 71
74#else /* CONFIG_QUEUED_SPINLOCKS */
75
76enum xen_contention_stat {
77 TAKEN_SLOW,
78 TAKEN_SLOW_PICKUP,
79 TAKEN_SLOW_SPURIOUS,
80 RELEASED_SLOW,
81 RELEASED_SLOW_KICKED,
82 NR_CONTENTION_STATS
83};
84
85
86#ifdef CONFIG_XEN_DEBUG_FS
87#define HISTO_BUCKETS 30
88static struct xen_spinlock_stats
89{
90 u32 contention_stats[NR_CONTENTION_STATS];
91 u32 histo_spin_blocked[HISTO_BUCKETS+1];
92 u64 time_blocked;
93} spinlock_stats;
94
95static u8 zero_stats;
96
97static inline void check_zero(void)
98{
99 u8 ret;
100 u8 old = READ_ONCE(zero_stats);
101 if (unlikely(old)) {
102 ret = cmpxchg(&zero_stats, old, 0);
103 /* This ensures only one fellow resets the stat */
104 if (ret == old)
105 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
106 }
107}
108
109static inline void add_stats(enum xen_contention_stat var, u32 val)
110{
111 check_zero();
112 spinlock_stats.contention_stats[var] += val;
113}
114
115static inline u64 spin_time_start(void)
116{
117 return xen_clocksource_read();
118}
119
120static void __spin_time_accum(u64 delta, u32 *array)
121{
122 unsigned index = ilog2(delta);
123
124 check_zero();
125
126 if (index < HISTO_BUCKETS)
127 array[index]++;
128 else
129 array[HISTO_BUCKETS]++;
130}
131
132static inline void spin_time_accum_blocked(u64 start)
133{
134 u32 delta = xen_clocksource_read() - start;
135
136 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
137 spinlock_stats.time_blocked += delta;
138}
139#else /* !CONFIG_XEN_DEBUG_FS */
140static inline void add_stats(enum xen_contention_stat var, u32 val)
141{
142}
143
144static inline u64 spin_time_start(void)
145{
146 return 0;
147}
148
149static inline void spin_time_accum_blocked(u64 start)
150{
151}
152#endif /* CONFIG_XEN_DEBUG_FS */
153
154struct xen_lock_waiting {
155 struct arch_spinlock *lock;
156 __ticket_t want;
157};
158
159static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
160static cpumask_t waiting_cpus;
161
162__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
163{
164 int irq = __this_cpu_read(lock_kicker_irq);
165 struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
166 int cpu = smp_processor_id();
167 u64 start;
168 __ticket_t head;
169 unsigned long flags;
170
171 /* If kicker interrupts not initialized yet, just spin */
172 if (irq == -1)
173 return;
174
175 start = spin_time_start();
176
177 /*
178 * Make sure an interrupt handler can't upset things in a
179 * partially setup state.
180 */
181 local_irq_save(flags);
182 /*
183 * We don't really care if we're overwriting some other
184 * (lock,want) pair, as that would mean that we're currently
185 * in an interrupt context, and the outer context had
186 * interrupts enabled. That has already kicked the VCPU out
187 * of xen_poll_irq(), so it will just return spuriously and
188 * retry with newly setup (lock,want).
189 *
190 * The ordering protocol on this is that the "lock" pointer
191 * may only be set non-NULL if the "want" ticket is correct.
192 * If we're updating "want", we must first clear "lock".
193 */
194 w->lock = NULL;
195 smp_wmb();
196 w->want = want;
197 smp_wmb();
198 w->lock = lock;
199
200 /* This uses set_bit, which atomic and therefore a barrier */
201 cpumask_set_cpu(cpu, &waiting_cpus);
202 add_stats(TAKEN_SLOW, 1);
203
204 /* clear pending */
205 xen_clear_irq_pending(irq);
206
207 /* Only check lock once pending cleared */
208 barrier();
209
210 /*
211 * Mark entry to slowpath before doing the pickup test to make
212 * sure we don't deadlock with an unlocker.
213 */
214 __ticket_enter_slowpath(lock);
215
216 /* make sure enter_slowpath, which is atomic does not cross the read */
217 smp_mb__after_atomic();
218
219 /*
220 * check again make sure it didn't become free while
221 * we weren't looking
222 */
223 head = READ_ONCE(lock->tickets.head);
224 if (__tickets_equal(head, want)) {
225 add_stats(TAKEN_SLOW_PICKUP, 1);
226 goto out;
227 }
228
229 /* Allow interrupts while blocked */
230 local_irq_restore(flags);
231
232 /*
233 * If an interrupt happens here, it will leave the wakeup irq
234 * pending, which will cause xen_poll_irq() to return
235 * immediately.
236 */
237
238 /* Block until irq becomes pending (or perhaps a spurious wakeup) */
239 xen_poll_irq(irq);
240 add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
241
242 local_irq_save(flags);
243
244 kstat_incr_irq_this_cpu(irq);
245out:
246 cpumask_clear_cpu(cpu, &waiting_cpus);
247 w->lock = NULL;
248
249 local_irq_restore(flags);
250
251 spin_time_accum_blocked(start);
252}
253PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
254
255static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
256{
257 int cpu;
258
259 add_stats(RELEASED_SLOW, 1);
260
261 for_each_cpu(cpu, &waiting_cpus) {
262 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
263
264 /* Make sure we read lock before want */
265 if (READ_ONCE(w->lock) == lock &&
266 READ_ONCE(w->want) == next) {
267 add_stats(RELEASED_SLOW_KICKED, 1);
268 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
269 break;
270 }
271 }
272}
273#endif /* CONFIG_QUEUED_SPINLOCKS */
274
275static irqreturn_t dummy_handler(int irq, void *dev_id) 72static irqreturn_t dummy_handler(int irq, void *dev_id)
276{ 73{
277 BUG(); 74 BUG();
@@ -334,16 +131,12 @@ void __init xen_init_spinlocks(void)
334 return; 131 return;
335 } 132 }
336 printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); 133 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
337#ifdef CONFIG_QUEUED_SPINLOCKS 134
338 __pv_init_lock_hash(); 135 __pv_init_lock_hash();
339 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; 136 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
340 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); 137 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
341 pv_lock_ops.wait = xen_qlock_wait; 138 pv_lock_ops.wait = xen_qlock_wait;
342 pv_lock_ops.kick = xen_qlock_kick; 139 pv_lock_ops.kick = xen_qlock_kick;
343#else
344 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
345 pv_lock_ops.unlock_kick = xen_unlock_kick;
346#endif
347} 140}
348 141
349/* 142/*
@@ -372,44 +165,3 @@ static __init int xen_parse_nopvspin(char *arg)
372} 165}
373early_param("xen_nopvspin", xen_parse_nopvspin); 166early_param("xen_nopvspin", xen_parse_nopvspin);
374 167
375#if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS)
376
377static struct dentry *d_spin_debug;
378
379static int __init xen_spinlock_debugfs(void)
380{
381 struct dentry *d_xen = xen_init_debugfs();
382
383 if (d_xen == NULL)
384 return -ENOMEM;
385
386 if (!xen_pvspin)
387 return 0;
388
389 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
390
391 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
392
393 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
394 &spinlock_stats.contention_stats[TAKEN_SLOW]);
395 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
396 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
397 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
398 &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
399
400 debugfs_create_u32("released_slow", 0444, d_spin_debug,
401 &spinlock_stats.contention_stats[RELEASED_SLOW]);
402 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
403 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
404
405 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
406 &spinlock_stats.time_blocked);
407
408 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
409 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
410
411 return 0;
412}
413fs_initcall(xen_spinlock_debugfs);
414
415#endif /* CONFIG_XEN_DEBUG_FS */