diff options
Diffstat (limited to 'arch/x86/xen/spinlock.c')
-rw-r--r-- | arch/x86/xen/spinlock.c | 428 |
1 files changed, 428 insertions, 0 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c new file mode 100644 index 000000000000..dd71e3a021cd --- /dev/null +++ b/arch/x86/xen/spinlock.c | |||
@@ -0,0 +1,428 @@ | |||
1 | /* | ||
2 | * Split spinlock implementation out into its own file, so it can be | ||
3 | * compiled in a FTRACE-compatible way. | ||
4 | */ | ||
5 | #include <linux/kernel_stat.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/debugfs.h> | ||
8 | #include <linux/log2.h> | ||
9 | |||
10 | #include <asm/paravirt.h> | ||
11 | |||
12 | #include <xen/interface/xen.h> | ||
13 | #include <xen/events.h> | ||
14 | |||
15 | #include "xen-ops.h" | ||
16 | #include "debugfs.h" | ||
17 | |||
18 | #ifdef CONFIG_XEN_DEBUG_FS | ||
19 | static struct xen_spinlock_stats | ||
20 | { | ||
21 | u64 taken; | ||
22 | u32 taken_slow; | ||
23 | u32 taken_slow_nested; | ||
24 | u32 taken_slow_pickup; | ||
25 | u32 taken_slow_spurious; | ||
26 | u32 taken_slow_irqenable; | ||
27 | |||
28 | u64 released; | ||
29 | u32 released_slow; | ||
30 | u32 released_slow_kicked; | ||
31 | |||
32 | #define HISTO_BUCKETS 30 | ||
33 | u32 histo_spin_total[HISTO_BUCKETS+1]; | ||
34 | u32 histo_spin_spinning[HISTO_BUCKETS+1]; | ||
35 | u32 histo_spin_blocked[HISTO_BUCKETS+1]; | ||
36 | |||
37 | u64 time_total; | ||
38 | u64 time_spinning; | ||
39 | u64 time_blocked; | ||
40 | } spinlock_stats; | ||
41 | |||
42 | static u8 zero_stats; | ||
43 | |||
44 | static unsigned lock_timeout = 1 << 10; | ||
45 | #define TIMEOUT lock_timeout | ||
46 | |||
47 | static inline void check_zero(void) | ||
48 | { | ||
49 | if (unlikely(zero_stats)) { | ||
50 | memset(&spinlock_stats, 0, sizeof(spinlock_stats)); | ||
51 | zero_stats = 0; | ||
52 | } | ||
53 | } | ||
54 | |||
55 | #define ADD_STATS(elem, val) \ | ||
56 | do { check_zero(); spinlock_stats.elem += (val); } while(0) | ||
57 | |||
58 | static inline u64 spin_time_start(void) | ||
59 | { | ||
60 | return xen_clocksource_read(); | ||
61 | } | ||
62 | |||
63 | static void __spin_time_accum(u64 delta, u32 *array) | ||
64 | { | ||
65 | unsigned index = ilog2(delta); | ||
66 | |||
67 | check_zero(); | ||
68 | |||
69 | if (index < HISTO_BUCKETS) | ||
70 | array[index]++; | ||
71 | else | ||
72 | array[HISTO_BUCKETS]++; | ||
73 | } | ||
74 | |||
75 | static inline void spin_time_accum_spinning(u64 start) | ||
76 | { | ||
77 | u32 delta = xen_clocksource_read() - start; | ||
78 | |||
79 | __spin_time_accum(delta, spinlock_stats.histo_spin_spinning); | ||
80 | spinlock_stats.time_spinning += delta; | ||
81 | } | ||
82 | |||
83 | static inline void spin_time_accum_total(u64 start) | ||
84 | { | ||
85 | u32 delta = xen_clocksource_read() - start; | ||
86 | |||
87 | __spin_time_accum(delta, spinlock_stats.histo_spin_total); | ||
88 | spinlock_stats.time_total += delta; | ||
89 | } | ||
90 | |||
91 | static inline void spin_time_accum_blocked(u64 start) | ||
92 | { | ||
93 | u32 delta = xen_clocksource_read() - start; | ||
94 | |||
95 | __spin_time_accum(delta, spinlock_stats.histo_spin_blocked); | ||
96 | spinlock_stats.time_blocked += delta; | ||
97 | } | ||
98 | #else /* !CONFIG_XEN_DEBUG_FS */ | ||
99 | #define TIMEOUT (1 << 10) | ||
100 | #define ADD_STATS(elem, val) do { (void)(val); } while(0) | ||
101 | |||
102 | static inline u64 spin_time_start(void) | ||
103 | { | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static inline void spin_time_accum_total(u64 start) | ||
108 | { | ||
109 | } | ||
110 | static inline void spin_time_accum_spinning(u64 start) | ||
111 | { | ||
112 | } | ||
113 | static inline void spin_time_accum_blocked(u64 start) | ||
114 | { | ||
115 | } | ||
116 | #endif /* CONFIG_XEN_DEBUG_FS */ | ||
117 | |||
118 | struct xen_spinlock { | ||
119 | unsigned char lock; /* 0 -> free; 1 -> locked */ | ||
120 | unsigned short spinners; /* count of waiting cpus */ | ||
121 | }; | ||
122 | |||
123 | static int xen_spin_is_locked(struct raw_spinlock *lock) | ||
124 | { | ||
125 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
126 | |||
127 | return xl->lock != 0; | ||
128 | } | ||
129 | |||
130 | static int xen_spin_is_contended(struct raw_spinlock *lock) | ||
131 | { | ||
132 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
133 | |||
134 | /* Not strictly true; this is only the count of contended | ||
135 | lock-takers entering the slow path. */ | ||
136 | return xl->spinners != 0; | ||
137 | } | ||
138 | |||
139 | static int xen_spin_trylock(struct raw_spinlock *lock) | ||
140 | { | ||
141 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
142 | u8 old = 1; | ||
143 | |||
144 | asm("xchgb %b0,%1" | ||
145 | : "+q" (old), "+m" (xl->lock) : : "memory"); | ||
146 | |||
147 | return old == 0; | ||
148 | } | ||
149 | |||
150 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | ||
151 | static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); | ||
152 | |||
153 | /* | ||
154 | * Mark a cpu as interested in a lock. Returns the CPU's previous | ||
155 | * lock of interest, in case we got preempted by an interrupt. | ||
156 | */ | ||
157 | static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) | ||
158 | { | ||
159 | struct xen_spinlock *prev; | ||
160 | |||
161 | prev = __get_cpu_var(lock_spinners); | ||
162 | __get_cpu_var(lock_spinners) = xl; | ||
163 | |||
164 | wmb(); /* set lock of interest before count */ | ||
165 | |||
166 | asm(LOCK_PREFIX " incw %0" | ||
167 | : "+m" (xl->spinners) : : "memory"); | ||
168 | |||
169 | return prev; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * Mark a cpu as no longer interested in a lock. Restores previous | ||
174 | * lock of interest (NULL for none). | ||
175 | */ | ||
176 | static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) | ||
177 | { | ||
178 | asm(LOCK_PREFIX " decw %0" | ||
179 | : "+m" (xl->spinners) : : "memory"); | ||
180 | wmb(); /* decrement count before restoring lock */ | ||
181 | __get_cpu_var(lock_spinners) = prev; | ||
182 | } | ||
183 | |||
184 | static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) | ||
185 | { | ||
186 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
187 | struct xen_spinlock *prev; | ||
188 | int irq = __get_cpu_var(lock_kicker_irq); | ||
189 | int ret; | ||
190 | unsigned long flags; | ||
191 | u64 start; | ||
192 | |||
193 | /* If kicker interrupts not initialized yet, just spin */ | ||
194 | if (irq == -1) | ||
195 | return 0; | ||
196 | |||
197 | start = spin_time_start(); | ||
198 | |||
199 | /* announce we're spinning */ | ||
200 | prev = spinning_lock(xl); | ||
201 | |||
202 | flags = __raw_local_save_flags(); | ||
203 | if (irq_enable) { | ||
204 | ADD_STATS(taken_slow_irqenable, 1); | ||
205 | raw_local_irq_enable(); | ||
206 | } | ||
207 | |||
208 | ADD_STATS(taken_slow, 1); | ||
209 | ADD_STATS(taken_slow_nested, prev != NULL); | ||
210 | |||
211 | do { | ||
212 | /* clear pending */ | ||
213 | xen_clear_irq_pending(irq); | ||
214 | |||
215 | /* check again make sure it didn't become free while | ||
216 | we weren't looking */ | ||
217 | ret = xen_spin_trylock(lock); | ||
218 | if (ret) { | ||
219 | ADD_STATS(taken_slow_pickup, 1); | ||
220 | |||
221 | /* | ||
222 | * If we interrupted another spinlock while it | ||
223 | * was blocking, make sure it doesn't block | ||
224 | * without rechecking the lock. | ||
225 | */ | ||
226 | if (prev != NULL) | ||
227 | xen_set_irq_pending(irq); | ||
228 | goto out; | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * Block until irq becomes pending. If we're | ||
233 | * interrupted at this point (after the trylock but | ||
234 | * before entering the block), then the nested lock | ||
235 | * handler guarantees that the irq will be left | ||
236 | * pending if there's any chance the lock became free; | ||
237 | * xen_poll_irq() returns immediately if the irq is | ||
238 | * pending. | ||
239 | */ | ||
240 | xen_poll_irq(irq); | ||
241 | ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); | ||
242 | } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ | ||
243 | |||
244 | kstat_this_cpu.irqs[irq]++; | ||
245 | |||
246 | out: | ||
247 | raw_local_irq_restore(flags); | ||
248 | unspinning_lock(xl, prev); | ||
249 | spin_time_accum_blocked(start); | ||
250 | |||
251 | return ret; | ||
252 | } | ||
253 | |||
254 | static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) | ||
255 | { | ||
256 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
257 | unsigned timeout; | ||
258 | u8 oldval; | ||
259 | u64 start_spin; | ||
260 | |||
261 | ADD_STATS(taken, 1); | ||
262 | |||
263 | start_spin = spin_time_start(); | ||
264 | |||
265 | do { | ||
266 | u64 start_spin_fast = spin_time_start(); | ||
267 | |||
268 | timeout = TIMEOUT; | ||
269 | |||
270 | asm("1: xchgb %1,%0\n" | ||
271 | " testb %1,%1\n" | ||
272 | " jz 3f\n" | ||
273 | "2: rep;nop\n" | ||
274 | " cmpb $0,%0\n" | ||
275 | " je 1b\n" | ||
276 | " dec %2\n" | ||
277 | " jnz 2b\n" | ||
278 | "3:\n" | ||
279 | : "+m" (xl->lock), "=q" (oldval), "+r" (timeout) | ||
280 | : "1" (1) | ||
281 | : "memory"); | ||
282 | |||
283 | spin_time_accum_spinning(start_spin_fast); | ||
284 | |||
285 | } while (unlikely(oldval != 0 && | ||
286 | (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable)))); | ||
287 | |||
288 | spin_time_accum_total(start_spin); | ||
289 | } | ||
290 | |||
291 | static void xen_spin_lock(struct raw_spinlock *lock) | ||
292 | { | ||
293 | __xen_spin_lock(lock, false); | ||
294 | } | ||
295 | |||
296 | static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) | ||
297 | { | ||
298 | __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); | ||
299 | } | ||
300 | |||
301 | static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) | ||
302 | { | ||
303 | int cpu; | ||
304 | |||
305 | ADD_STATS(released_slow, 1); | ||
306 | |||
307 | for_each_online_cpu(cpu) { | ||
308 | /* XXX should mix up next cpu selection */ | ||
309 | if (per_cpu(lock_spinners, cpu) == xl) { | ||
310 | ADD_STATS(released_slow_kicked, 1); | ||
311 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); | ||
312 | break; | ||
313 | } | ||
314 | } | ||
315 | } | ||
316 | |||
317 | static void xen_spin_unlock(struct raw_spinlock *lock) | ||
318 | { | ||
319 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
320 | |||
321 | ADD_STATS(released, 1); | ||
322 | |||
323 | smp_wmb(); /* make sure no writes get moved after unlock */ | ||
324 | xl->lock = 0; /* release lock */ | ||
325 | |||
326 | /* make sure unlock happens before kick */ | ||
327 | barrier(); | ||
328 | |||
329 | if (unlikely(xl->spinners)) | ||
330 | xen_spin_unlock_slow(xl); | ||
331 | } | ||
332 | |||
333 | static irqreturn_t dummy_handler(int irq, void *dev_id) | ||
334 | { | ||
335 | BUG(); | ||
336 | return IRQ_HANDLED; | ||
337 | } | ||
338 | |||
339 | void __cpuinit xen_init_lock_cpu(int cpu) | ||
340 | { | ||
341 | int irq; | ||
342 | const char *name; | ||
343 | |||
344 | name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); | ||
345 | irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, | ||
346 | cpu, | ||
347 | dummy_handler, | ||
348 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | ||
349 | name, | ||
350 | NULL); | ||
351 | |||
352 | if (irq >= 0) { | ||
353 | disable_irq(irq); /* make sure it's never delivered */ | ||
354 | per_cpu(lock_kicker_irq, cpu) = irq; | ||
355 | } | ||
356 | |||
357 | printk("cpu %d spinlock event irq %d\n", cpu, irq); | ||
358 | } | ||
359 | |||
360 | void xen_uninit_lock_cpu(int cpu) | ||
361 | { | ||
362 | unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); | ||
363 | } | ||
364 | |||
365 | void __init xen_init_spinlocks(void) | ||
366 | { | ||
367 | pv_lock_ops.spin_is_locked = xen_spin_is_locked; | ||
368 | pv_lock_ops.spin_is_contended = xen_spin_is_contended; | ||
369 | pv_lock_ops.spin_lock = xen_spin_lock; | ||
370 | pv_lock_ops.spin_lock_flags = xen_spin_lock_flags; | ||
371 | pv_lock_ops.spin_trylock = xen_spin_trylock; | ||
372 | pv_lock_ops.spin_unlock = xen_spin_unlock; | ||
373 | } | ||
374 | |||
375 | #ifdef CONFIG_XEN_DEBUG_FS | ||
376 | |||
377 | static struct dentry *d_spin_debug; | ||
378 | |||
379 | static int __init xen_spinlock_debugfs(void) | ||
380 | { | ||
381 | struct dentry *d_xen = xen_init_debugfs(); | ||
382 | |||
383 | if (d_xen == NULL) | ||
384 | return -ENOMEM; | ||
385 | |||
386 | d_spin_debug = debugfs_create_dir("spinlocks", d_xen); | ||
387 | |||
388 | debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); | ||
389 | |||
390 | debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout); | ||
391 | |||
392 | debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken); | ||
393 | debugfs_create_u32("taken_slow", 0444, d_spin_debug, | ||
394 | &spinlock_stats.taken_slow); | ||
395 | debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug, | ||
396 | &spinlock_stats.taken_slow_nested); | ||
397 | debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, | ||
398 | &spinlock_stats.taken_slow_pickup); | ||
399 | debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug, | ||
400 | &spinlock_stats.taken_slow_spurious); | ||
401 | debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug, | ||
402 | &spinlock_stats.taken_slow_irqenable); | ||
403 | |||
404 | debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released); | ||
405 | debugfs_create_u32("released_slow", 0444, d_spin_debug, | ||
406 | &spinlock_stats.released_slow); | ||
407 | debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, | ||
408 | &spinlock_stats.released_slow_kicked); | ||
409 | |||
410 | debugfs_create_u64("time_spinning", 0444, d_spin_debug, | ||
411 | &spinlock_stats.time_spinning); | ||
412 | debugfs_create_u64("time_blocked", 0444, d_spin_debug, | ||
413 | &spinlock_stats.time_blocked); | ||
414 | debugfs_create_u64("time_total", 0444, d_spin_debug, | ||
415 | &spinlock_stats.time_total); | ||
416 | |||
417 | xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug, | ||
418 | spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1); | ||
419 | xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug, | ||
420 | spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1); | ||
421 | xen_debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, | ||
422 | spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); | ||
423 | |||
424 | return 0; | ||
425 | } | ||
426 | fs_initcall(xen_spinlock_debugfs); | ||
427 | |||
428 | #endif /* CONFIG_XEN_DEBUG_FS */ | ||