aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-21 10:45:56 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-21 10:45:56 -0400
commit2e2dcc7631e331cf2e8396ce452e7f01e35f1182 (patch)
tree5a02c9602db66bc8c8db9660899c0c4455d7464f
parentacee709cab689ec7703770e8b8cb5cc3a4abcb31 (diff)
parent1c29dd9a9e2f83ffb02e50bb3619c3b9db8fd526 (diff)
Merge branch 'x86/paravirt-spinlocks' into x86/for-linus
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/paravirt.c24
-rw-r--r--arch/x86/xen/smp.c171
-rw-r--r--drivers/xen/events.c27
-rw-r--r--include/asm-x86/paravirt.h43
-rw-r--r--include/asm-x86/spinlock.h118
-rw-r--r--include/asm-x86/spinlock_types.h2
-rw-r--r--include/asm-x86/xen/events.h1
-rw-r--r--include/xen/events.h7
9 files changed, 379 insertions, 17 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index b78a17b12810..3db651fc8ec5 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -7,9 +7,10 @@ extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinu
7CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) 7CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
8 8
9ifdef CONFIG_FTRACE 9ifdef CONFIG_FTRACE
10# Do not profile debug utilities 10# Do not profile debug and lowlevel utilities
11CFLAGS_REMOVE_tsc.o = -pg 11CFLAGS_REMOVE_tsc.o = -pg
12CFLAGS_REMOVE_rtc.o = -pg 12CFLAGS_REMOVE_rtc.o = -pg
13CFLAGS_REMOVE_paravirt.o = -pg
13endif 14endif
14 15
15# 16#
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index b4564d089b43..097d8a6797fa 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -124,6 +124,7 @@ static void *get_call_destination(u8 type)
124 .pv_irq_ops = pv_irq_ops, 124 .pv_irq_ops = pv_irq_ops,
125 .pv_apic_ops = pv_apic_ops, 125 .pv_apic_ops = pv_apic_ops,
126 .pv_mmu_ops = pv_mmu_ops, 126 .pv_mmu_ops = pv_mmu_ops,
127 .pv_lock_ops = pv_lock_ops,
127 }; 128 };
128 return *((void **)&tmpl + type); 129 return *((void **)&tmpl + type);
129} 130}
@@ -267,6 +268,17 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
267 return __get_cpu_var(paravirt_lazy_mode); 268 return __get_cpu_var(paravirt_lazy_mode);
268} 269}
269 270
271void __init paravirt_use_bytelocks(void)
272{
273#ifdef CONFIG_SMP
274 pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
275 pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
276 pv_lock_ops.spin_lock = __byte_spin_lock;
277 pv_lock_ops.spin_trylock = __byte_spin_trylock;
278 pv_lock_ops.spin_unlock = __byte_spin_unlock;
279#endif
280}
281
270struct pv_info pv_info = { 282struct pv_info pv_info = {
271 .name = "bare hardware", 283 .name = "bare hardware",
272 .paravirt_enabled = 0, 284 .paravirt_enabled = 0,
@@ -449,6 +461,18 @@ struct pv_mmu_ops pv_mmu_ops = {
449 .set_fixmap = native_set_fixmap, 461 .set_fixmap = native_set_fixmap,
450}; 462};
451 463
464struct pv_lock_ops pv_lock_ops = {
465#ifdef CONFIG_SMP
466 .spin_is_locked = __ticket_spin_is_locked,
467 .spin_is_contended = __ticket_spin_is_contended,
468
469 .spin_lock = __ticket_spin_lock,
470 .spin_trylock = __ticket_spin_trylock,
471 .spin_unlock = __ticket_spin_unlock,
472#endif
473};
474EXPORT_SYMBOL_GPL(pv_lock_ops);
475
452EXPORT_SYMBOL_GPL(pv_time_ops); 476EXPORT_SYMBOL_GPL(pv_time_ops);
453EXPORT_SYMBOL (pv_cpu_ops); 477EXPORT_SYMBOL (pv_cpu_ops);
454EXPORT_SYMBOL (pv_mmu_ops); 478EXPORT_SYMBOL (pv_mmu_ops);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index f702199312a5..e693812ac59a 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -15,6 +15,7 @@
15 * This does not handle HOTPLUG_CPU yet. 15 * This does not handle HOTPLUG_CPU yet.
16 */ 16 */
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/smp.h> 20#include <linux/smp.h>
20 21
@@ -35,6 +36,8 @@
35#include "xen-ops.h" 36#include "xen-ops.h"
36#include "mmu.h" 37#include "mmu.h"
37 38
39static void __cpuinit xen_init_lock_cpu(int cpu);
40
38cpumask_t xen_cpu_initialized_map; 41cpumask_t xen_cpu_initialized_map;
39 42
40static DEFINE_PER_CPU(int, resched_irq); 43static DEFINE_PER_CPU(int, resched_irq);
@@ -179,6 +182,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
179{ 182{
180 unsigned cpu; 183 unsigned cpu;
181 184
185 xen_init_lock_cpu(0);
186
182 smp_store_cpu_info(0); 187 smp_store_cpu_info(0);
183 cpu_data(0).x86_max_cores = 1; 188 cpu_data(0).x86_max_cores = 1;
184 set_cpu_sibling_map(0); 189 set_cpu_sibling_map(0);
@@ -301,6 +306,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
301 clear_tsk_thread_flag(idle, TIF_FORK); 306 clear_tsk_thread_flag(idle, TIF_FORK);
302#endif 307#endif
303 xen_setup_timer(cpu); 308 xen_setup_timer(cpu);
309 xen_init_lock_cpu(cpu);
304 310
305 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 311 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
306 312
@@ -413,6 +419,170 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
413 return IRQ_HANDLED; 419 return IRQ_HANDLED;
414} 420}
415 421
422struct xen_spinlock {
423 unsigned char lock; /* 0 -> free; 1 -> locked */
424 unsigned short spinners; /* count of waiting cpus */
425};
426
427static int xen_spin_is_locked(struct raw_spinlock *lock)
428{
429 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
430
431 return xl->lock != 0;
432}
433
434static int xen_spin_is_contended(struct raw_spinlock *lock)
435{
436 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
437
438 /* Not strictly true; this is only the count of contended
439 lock-takers entering the slow path. */
440 return xl->spinners != 0;
441}
442
443static int xen_spin_trylock(struct raw_spinlock *lock)
444{
445 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
446 u8 old = 1;
447
448 asm("xchgb %b0,%1"
449 : "+q" (old), "+m" (xl->lock) : : "memory");
450
451 return old == 0;
452}
453
454static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
455static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
456
457static inline void spinning_lock(struct xen_spinlock *xl)
458{
459 __get_cpu_var(lock_spinners) = xl;
460 wmb(); /* set lock of interest before count */
461 asm(LOCK_PREFIX " incw %0"
462 : "+m" (xl->spinners) : : "memory");
463}
464
465static inline void unspinning_lock(struct xen_spinlock *xl)
466{
467 asm(LOCK_PREFIX " decw %0"
468 : "+m" (xl->spinners) : : "memory");
469 wmb(); /* decrement count before clearing lock */
470 __get_cpu_var(lock_spinners) = NULL;
471}
472
473static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
474{
475 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
476 int irq = __get_cpu_var(lock_kicker_irq);
477 int ret;
478
479 /* If kicker interrupts not initialized yet, just spin */
480 if (irq == -1)
481 return 0;
482
483 /* announce we're spinning */
484 spinning_lock(xl);
485
486 /* clear pending */
487 xen_clear_irq_pending(irq);
488
489 /* check again make sure it didn't become free while
490 we weren't looking */
491 ret = xen_spin_trylock(lock);
492 if (ret)
493 goto out;
494
495 /* block until irq becomes pending */
496 xen_poll_irq(irq);
497 kstat_this_cpu.irqs[irq]++;
498
499out:
500 unspinning_lock(xl);
501 return ret;
502}
503
504static void xen_spin_lock(struct raw_spinlock *lock)
505{
506 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
507 int timeout;
508 u8 oldval;
509
510 do {
511 timeout = 1 << 10;
512
513 asm("1: xchgb %1,%0\n"
514 " testb %1,%1\n"
515 " jz 3f\n"
516 "2: rep;nop\n"
517 " cmpb $0,%0\n"
518 " je 1b\n"
519 " dec %2\n"
520 " jnz 2b\n"
521 "3:\n"
522 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
523 : "1" (1)
524 : "memory");
525
526 } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
527}
528
529static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
530{
531 int cpu;
532
533 for_each_online_cpu(cpu) {
534 /* XXX should mix up next cpu selection */
535 if (per_cpu(lock_spinners, cpu) == xl) {
536 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
537 break;
538 }
539 }
540}
541
542static void xen_spin_unlock(struct raw_spinlock *lock)
543{
544 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
545
546 smp_wmb(); /* make sure no writes get moved after unlock */
547 xl->lock = 0; /* release lock */
548
549 /* make sure unlock happens before kick */
550 barrier();
551
552 if (unlikely(xl->spinners))
553 xen_spin_unlock_slow(xl);
554}
555
556static __cpuinit void xen_init_lock_cpu(int cpu)
557{
558 int irq;
559 const char *name;
560
561 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
562 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
563 cpu,
564 xen_reschedule_interrupt,
565 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
566 name,
567 NULL);
568
569 if (irq >= 0) {
570 disable_irq(irq); /* make sure it's never delivered */
571 per_cpu(lock_kicker_irq, cpu) = irq;
572 }
573
574 printk("cpu %d spinlock event irq %d\n", cpu, irq);
575}
576
577static void __init xen_init_spinlocks(void)
578{
579 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
580 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
581 pv_lock_ops.spin_lock = xen_spin_lock;
582 pv_lock_ops.spin_trylock = xen_spin_trylock;
583 pv_lock_ops.spin_unlock = xen_spin_unlock;
584}
585
416static const struct smp_ops xen_smp_ops __initdata = { 586static const struct smp_ops xen_smp_ops __initdata = {
417 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 587 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
418 .smp_prepare_cpus = xen_smp_prepare_cpus, 588 .smp_prepare_cpus = xen_smp_prepare_cpus,
@@ -430,4 +600,5 @@ void __init xen_smp_init(void)
430{ 600{
431 smp_ops = xen_smp_ops; 601 smp_ops = xen_smp_ops;
432 xen_fill_possible_map(); 602 xen_fill_possible_map();
603 xen_init_spinlocks();
433} 604}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 332dd63750a0..0e0c28574af8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -734,6 +734,33 @@ static void restore_cpu_ipis(unsigned int cpu)
734 } 734 }
735} 735}
736 736
737/* Clear an irq's pending state, in preparation for polling on it */
738void xen_clear_irq_pending(int irq)
739{
740 int evtchn = evtchn_from_irq(irq);
741
742 if (VALID_EVTCHN(evtchn))
743 clear_evtchn(evtchn);
744}
745
746/* Poll waiting for an irq to become pending. In the usual case, the
747 irq will be disabled so it won't deliver an interrupt. */
748void xen_poll_irq(int irq)
749{
750 evtchn_port_t evtchn = evtchn_from_irq(irq);
751
752 if (VALID_EVTCHN(evtchn)) {
753 struct sched_poll poll;
754
755 poll.nr_ports = 1;
756 poll.timeout = 0;
757 poll.ports = &evtchn;
758
759 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
760 BUG();
761 }
762}
763
737void xen_irq_resume(void) 764void xen_irq_resume(void)
738{ 765{
739 unsigned int cpu, irq, evtchn; 766 unsigned int cpu, irq, evtchn;
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 695ce9383f52..aec9767836b6 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -325,6 +325,15 @@ struct pv_mmu_ops {
325 unsigned long phys, pgprot_t flags); 325 unsigned long phys, pgprot_t flags);
326}; 326};
327 327
328struct raw_spinlock;
329struct pv_lock_ops {
330 int (*spin_is_locked)(struct raw_spinlock *lock);
331 int (*spin_is_contended)(struct raw_spinlock *lock);
332 void (*spin_lock)(struct raw_spinlock *lock);
333 int (*spin_trylock)(struct raw_spinlock *lock);
334 void (*spin_unlock)(struct raw_spinlock *lock);
335};
336
328/* This contains all the paravirt structures: we get a convenient 337/* This contains all the paravirt structures: we get a convenient
329 * number for each function using the offset which we use to indicate 338 * number for each function using the offset which we use to indicate
330 * what to patch. */ 339 * what to patch. */
@@ -335,6 +344,7 @@ struct paravirt_patch_template {
335 struct pv_irq_ops pv_irq_ops; 344 struct pv_irq_ops pv_irq_ops;
336 struct pv_apic_ops pv_apic_ops; 345 struct pv_apic_ops pv_apic_ops;
337 struct pv_mmu_ops pv_mmu_ops; 346 struct pv_mmu_ops pv_mmu_ops;
347 struct pv_lock_ops pv_lock_ops;
338}; 348};
339 349
340extern struct pv_info pv_info; 350extern struct pv_info pv_info;
@@ -344,6 +354,7 @@ extern struct pv_cpu_ops pv_cpu_ops;
344extern struct pv_irq_ops pv_irq_ops; 354extern struct pv_irq_ops pv_irq_ops;
345extern struct pv_apic_ops pv_apic_ops; 355extern struct pv_apic_ops pv_apic_ops;
346extern struct pv_mmu_ops pv_mmu_ops; 356extern struct pv_mmu_ops pv_mmu_ops;
357extern struct pv_lock_ops pv_lock_ops;
347 358
348#define PARAVIRT_PATCH(x) \ 359#define PARAVIRT_PATCH(x) \
349 (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) 360 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
@@ -1368,6 +1379,37 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1368void _paravirt_nop(void); 1379void _paravirt_nop(void);
1369#define paravirt_nop ((void *)_paravirt_nop) 1380#define paravirt_nop ((void *)_paravirt_nop)
1370 1381
1382void paravirt_use_bytelocks(void);
1383
1384#ifdef CONFIG_SMP
1385
1386static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1387{
1388 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1389}
1390
1391static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1392{
1393 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1394}
1395
1396static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1397{
1398 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1399}
1400
1401static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1402{
1403 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1404}
1405
1406static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1407{
1408 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1409}
1410
1411#endif
1412
1371/* These all sit in the .parainstructions section to tell us what to patch. */ 1413/* These all sit in the .parainstructions section to tell us what to patch. */
1372struct paravirt_patch_site { 1414struct paravirt_patch_site {
1373 u8 *instr; /* original instructions */ 1415 u8 *instr; /* original instructions */
@@ -1452,6 +1494,7 @@ static inline unsigned long __raw_local_irq_save(void)
1452 return f; 1494 return f;
1453} 1495}
1454 1496
1497
1455/* Make sure as little as possible of this mess escapes. */ 1498/* Make sure as little as possible of this mess escapes. */
1456#undef PARAVIRT_CALL 1499#undef PARAVIRT_CALL
1457#undef __PVOP_CALL 1500#undef __PVOP_CALL
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 21e89bf92f1c..4f9a9861799a 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -6,7 +6,7 @@
6#include <asm/page.h> 6#include <asm/page.h>
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9 9#include <asm/paravirt.h>
10/* 10/*
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere 11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 * 12 *
@@ -54,21 +54,21 @@
54 * much between them in performance though, especially as locks are out of line. 54 * much between them in performance though, especially as locks are out of line.
55 */ 55 */
56#if (NR_CPUS < 256) 56#if (NR_CPUS < 256)
57static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 57static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
58{ 58{
59 int tmp = ACCESS_ONCE(lock->slock); 59 int tmp = ACCESS_ONCE(lock->slock);
60 60
61 return (((tmp >> 8) & 0xff) != (tmp & 0xff)); 61 return (((tmp >> 8) & 0xff) != (tmp & 0xff));
62} 62}
63 63
64static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 64static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
65{ 65{
66 int tmp = ACCESS_ONCE(lock->slock); 66 int tmp = ACCESS_ONCE(lock->slock);
67 67
68 return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; 68 return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
69} 69}
70 70
71static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 71static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
72{ 72{
73 short inc = 0x0100; 73 short inc = 0x0100;
74 74
@@ -87,9 +87,7 @@ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
87 : "memory", "cc"); 87 : "memory", "cc");
88} 88}
89 89
90#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 90static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
91
92static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
93{ 91{
94 int tmp; 92 int tmp;
95 short new; 93 short new;
@@ -110,7 +108,7 @@ static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
110 return tmp; 108 return tmp;
111} 109}
112 110
113static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 111static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
114{ 112{
115 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 113 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
116 : "+m" (lock->slock) 114 : "+m" (lock->slock)
@@ -118,21 +116,21 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
118 : "memory", "cc"); 116 : "memory", "cc");
119} 117}
120#else 118#else
121static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 119static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
122{ 120{
123 int tmp = ACCESS_ONCE(lock->slock); 121 int tmp = ACCESS_ONCE(lock->slock);
124 122
125 return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); 123 return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
126} 124}
127 125
128static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 126static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
129{ 127{
130 int tmp = ACCESS_ONCE(lock->slock); 128 int tmp = ACCESS_ONCE(lock->slock);
131 129
132 return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; 130 return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
133} 131}
134 132
135static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 133static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
136{ 134{
137 int inc = 0x00010000; 135 int inc = 0x00010000;
138 int tmp; 136 int tmp;
@@ -153,9 +151,7 @@ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
153 : "memory", "cc"); 151 : "memory", "cc");
154} 152}
155 153
156#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 154static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
157
158static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
159{ 155{
160 int tmp; 156 int tmp;
161 int new; 157 int new;
@@ -177,7 +173,7 @@ static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
177 return tmp; 173 return tmp;
178} 174}
179 175
180static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 176static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
181{ 177{
182 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 178 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
183 : "+m" (lock->slock) 179 : "+m" (lock->slock)
@@ -186,6 +182,98 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
186} 182}
187#endif 183#endif
188 184
185#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
186
187#ifdef CONFIG_PARAVIRT
188/*
189 * Define virtualization-friendly old-style lock byte lock, for use in
190 * pv_lock_ops if desired.
191 *
192 * This differs from the pre-2.6.24 spinlock by always using xchgb
193 * rather than decb to take the lock; this allows it to use a
194 * zero-initialized lock structure. It also maintains a 1-byte
195 * contention counter, so that we can implement
196 * __byte_spin_is_contended.
197 */
198struct __byte_spinlock {
199 s8 lock;
200 s8 spinners;
201};
202
203static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
204{
205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206 return bl->lock != 0;
207}
208
209static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
210{
211 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
212 return bl->spinners != 0;
213}
214
215static inline void __byte_spin_lock(raw_spinlock_t *lock)
216{
217 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
218 s8 val = 1;
219
220 asm("1: xchgb %1, %0\n"
221 " test %1,%1\n"
222 " jz 3f\n"
223 " " LOCK_PREFIX "incb %2\n"
224 "2: rep;nop\n"
225 " cmpb $1, %0\n"
226 " je 2b\n"
227 " " LOCK_PREFIX "decb %2\n"
228 " jmp 1b\n"
229 "3:"
230 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
231}
232
233static inline int __byte_spin_trylock(raw_spinlock_t *lock)
234{
235 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
236 u8 old = 1;
237
238 asm("xchgb %1,%0"
239 : "+m" (bl->lock), "+q" (old) : : "memory");
240
241 return old == 0;
242}
243
244static inline void __byte_spin_unlock(raw_spinlock_t *lock)
245{
246 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
247 smp_wmb();
248 bl->lock = 0;
249}
250#else /* !CONFIG_PARAVIRT */
251static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
252{
253 return __ticket_spin_is_locked(lock);
254}
255
256static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
257{
258 return __ticket_spin_is_contended(lock);
259}
260
261static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
262{
263 __ticket_spin_lock(lock);
264}
265
266static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
267{
268 return __ticket_spin_trylock(lock);
269}
270
271static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
272{
273 __ticket_spin_unlock(lock);
274}
275#endif /* CONFIG_PARAVIRT */
276
189static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 277static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
190{ 278{
191 while (__raw_spin_is_locked(lock)) 279 while (__raw_spin_is_locked(lock))
diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h
index 9029cf78cf5d..06c071c9eee9 100644
--- a/include/asm-x86/spinlock_types.h
+++ b/include/asm-x86/spinlock_types.h
@@ -5,7 +5,7 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8typedef struct { 8typedef struct raw_spinlock {
9 unsigned int slock; 9 unsigned int slock;
10} raw_spinlock_t; 10} raw_spinlock_t;
11 11
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
index f8d57ea1f05f..8ded74720024 100644
--- a/include/asm-x86/xen/events.h
+++ b/include/asm-x86/xen/events.h
@@ -5,6 +5,7 @@ enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR, 5 XEN_RESCHEDULE_VECTOR,
6 XEN_CALL_FUNCTION_VECTOR, 6 XEN_CALL_FUNCTION_VECTOR,
7 XEN_CALL_FUNCTION_SINGLE_VECTOR, 7 XEN_CALL_FUNCTION_SINGLE_VECTOR,
8 XEN_SPIN_UNLOCK_VECTOR,
8 9
9 XEN_NR_IPIS, 10 XEN_NR_IPIS,
10}; 11};
diff --git a/include/xen/events.h b/include/xen/events.h
index 67c4436554a9..4680ff3fbc91 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -44,4 +44,11 @@ extern void notify_remote_via_irq(int irq);
44 44
45extern void xen_irq_resume(void); 45extern void xen_irq_resume(void);
46 46
47/* Clear an irq's pending state, in preparation for polling on it */
48void xen_clear_irq_pending(int irq);
49
50/* Poll waiting for an irq to become pending. In the usual case, the
51 irq will be disabled so it won't deliver an interrupt. */
52void xen_poll_irq(int irq);
53
47#endif /* _XEN_EVENTS_H */ 54#endif /* _XEN_EVENTS_H */