aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-04 14:55:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-04 14:55:10 -0400
commit816434ec4a674fcdb3c2221a6dffdc8f34020550 (patch)
tree6b8a319171270b20bf1b2e1c98d333f47988553a /arch/x86/include/asm
parentf357a82048ff1e5645861475b014570e11ad1911 (diff)
parent36bd621337c91a1ecda588e5bbbae8dd9698bae7 (diff)
Merge branch 'x86-spinlocks-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 spinlock changes from Ingo Molnar: "The biggest change here are paravirtualized ticket spinlocks (PV spinlocks), which bring a nice speedup on various benchmarks. The KVM host side will come to you via the KVM tree" * 'x86-spinlocks-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/kvm/guest: Fix sparse warning: "symbol 'klock_waiting' was not declared as static" kvm: Paravirtual ticketlocks support for linux guests running on KVM hypervisor kvm guest: Add configuration support to enable debug information for KVM Guests kvm uapi: Add KICK_CPU and PV_UNHALT definition to uapi xen, pvticketlock: Allow interrupts to be enabled while blocking x86, ticketlock: Add slowpath logic jump_label: Split jumplabel ratelimit x86, pvticketlock: When paravirtualizing ticket locks, increment by 2 x86, pvticketlock: Use callee-save for lock_spinning xen, pvticketlocks: Add xen_nopvspin parameter to disable xen pv ticketlocks xen, pvticketlock: Xen implementation for PV ticket locks xen: Defer spinlock setup until boot CPU setup x86, ticketlock: Collapse a layer of functions x86, ticketlock: Don't inline _spin_unlock when using paravirt spinlocks x86, spinlock: Replace pv spinlocks with pv ticketlocks
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/kvm_para.h14
-rw-r--r--arch/x86/include/asm/paravirt.h32
-rw-r--r--arch/x86/include/asm/paravirt_types.h14
-rw-r--r--arch/x86/include/asm/spinlock.h128
-rw-r--r--arch/x86/include/asm/spinlock_types.h16
5 files changed, 124 insertions, 80 deletions
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 0644129a5333..1df115909758 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -112,10 +112,20 @@ void kvm_async_pf_task_wait(u32 token);
112void kvm_async_pf_task_wake(u32 token); 112void kvm_async_pf_task_wake(u32 token);
113u32 kvm_read_and_reset_pf_reason(void); 113u32 kvm_read_and_reset_pf_reason(void);
114extern void kvm_disable_steal_time(void); 114extern void kvm_disable_steal_time(void);
115#else 115
116#define kvm_guest_init() do { } while (0) 116#ifdef CONFIG_PARAVIRT_SPINLOCKS
117void __init kvm_spinlock_init(void);
118#else /* !CONFIG_PARAVIRT_SPINLOCKS */
119static inline void kvm_spinlock_init(void)
120{
121}
122#endif /* CONFIG_PARAVIRT_SPINLOCKS */
123
124#else /* CONFIG_KVM_GUEST */
125#define kvm_guest_init() do {} while (0)
117#define kvm_async_pf_task_wait(T) do {} while(0) 126#define kvm_async_pf_task_wait(T) do {} while(0)
118#define kvm_async_pf_task_wake(T) do {} while(0) 127#define kvm_async_pf_task_wake(T) do {} while(0)
128
119static inline u32 kvm_read_and_reset_pf_reason(void) 129static inline u32 kvm_read_and_reset_pf_reason(void)
120{ 130{
121 return 0; 131 return 0;
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index cfdc9ee4c900..401f350ef71b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -712,36 +712,16 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
712 712
713#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 713#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
714 714
715static inline int arch_spin_is_locked(struct arch_spinlock *lock) 715static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
716 __ticket_t ticket)
716{ 717{
717 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 718 PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
718} 719}
719 720
720static inline int arch_spin_is_contended(struct arch_spinlock *lock) 721static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
722 __ticket_t ticket)
721{ 723{
722 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 724 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
723}
724#define arch_spin_is_contended arch_spin_is_contended
725
726static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
727{
728 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
729}
730
731static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
732 unsigned long flags)
733{
734 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
735}
736
737static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
738{
739 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
740}
741
742static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
743{
744 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
745} 725}
746 726
747#endif 727#endif
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0617ff241e8f..aab8f671b523 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -327,13 +327,15 @@ struct pv_mmu_ops {
327}; 327};
328 328
329struct arch_spinlock; 329struct arch_spinlock;
330#ifdef CONFIG_SMP
331#include <asm/spinlock_types.h>
332#else
333typedef u16 __ticket_t;
334#endif
335
330struct pv_lock_ops { 336struct pv_lock_ops {
331 int (*spin_is_locked)(struct arch_spinlock *lock); 337 struct paravirt_callee_save lock_spinning;
332 int (*spin_is_contended)(struct arch_spinlock *lock); 338 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
333 void (*spin_lock)(struct arch_spinlock *lock);
334 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
335 int (*spin_trylock)(struct arch_spinlock *lock);
336 void (*spin_unlock)(struct arch_spinlock *lock);
337}; 339};
338 340
339/* This contains all the paravirt structures: we get a convenient 341/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index e0e668422c75..bf156ded74b5 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -1,11 +1,14 @@
1#ifndef _ASM_X86_SPINLOCK_H 1#ifndef _ASM_X86_SPINLOCK_H
2#define _ASM_X86_SPINLOCK_H 2#define _ASM_X86_SPINLOCK_H
3 3
4#include <linux/jump_label.h>
4#include <linux/atomic.h> 5#include <linux/atomic.h>
5#include <asm/page.h> 6#include <asm/page.h>
6#include <asm/processor.h> 7#include <asm/processor.h>
7#include <linux/compiler.h> 8#include <linux/compiler.h>
8#include <asm/paravirt.h> 9#include <asm/paravirt.h>
10#include <asm/bitops.h>
11
9/* 12/*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 13 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 * 14 *
@@ -34,6 +37,31 @@
34# define UNLOCK_LOCK_PREFIX 37# define UNLOCK_LOCK_PREFIX
35#endif 38#endif
36 39
40/* How long a lock should spin before we consider blocking */
41#define SPIN_THRESHOLD (1 << 15)
42
43extern struct static_key paravirt_ticketlocks_enabled;
44static __always_inline bool static_key_false(struct static_key *key);
45
46#ifdef CONFIG_PARAVIRT_SPINLOCKS
47
48static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
49{
50 set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
51}
52
53#else /* !CONFIG_PARAVIRT_SPINLOCKS */
54static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
55 __ticket_t ticket)
56{
57}
58static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
59 __ticket_t ticket)
60{
61}
62
63#endif /* CONFIG_PARAVIRT_SPINLOCKS */
64
37static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 65static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
38{ 66{
39 return lock.tickets.head == lock.tickets.tail; 67 return lock.tickets.head == lock.tickets.tail;
@@ -52,81 +80,101 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
52 * in the high part, because a wide xadd increment of the low part would carry 80 * in the high part, because a wide xadd increment of the low part would carry
53 * up and contaminate the high part. 81 * up and contaminate the high part.
54 */ 82 */
55static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 83static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
56{ 84{
57 register struct __raw_tickets inc = { .tail = 1 }; 85 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
58 86
59 inc = xadd(&lock->tickets, inc); 87 inc = xadd(&lock->tickets, inc);
88 if (likely(inc.head == inc.tail))
89 goto out;
60 90
91 inc.tail &= ~TICKET_SLOWPATH_FLAG;
61 for (;;) { 92 for (;;) {
62 if (inc.head == inc.tail) 93 unsigned count = SPIN_THRESHOLD;
63 break; 94
64 cpu_relax(); 95 do {
65 inc.head = ACCESS_ONCE(lock->tickets.head); 96 if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
97 goto out;
98 cpu_relax();
99 } while (--count);
100 __ticket_lock_spinning(lock, inc.tail);
66 } 101 }
67 barrier(); /* make sure nothing creeps before the lock is taken */ 102out: barrier(); /* make sure nothing creeps before the lock is taken */
68} 103}
69 104
70static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 105static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
71{ 106{
72 arch_spinlock_t old, new; 107 arch_spinlock_t old, new;
73 108
74 old.tickets = ACCESS_ONCE(lock->tickets); 109 old.tickets = ACCESS_ONCE(lock->tickets);
75 if (old.tickets.head != old.tickets.tail) 110 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
76 return 0; 111 return 0;
77 112
78 new.head_tail = old.head_tail + (1 << TICKET_SHIFT); 113 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
79 114
80 /* cmpxchg is a full barrier, so nothing can move before it */ 115 /* cmpxchg is a full barrier, so nothing can move before it */
81 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 116 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
82} 117}
83 118
84static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 119static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
120 arch_spinlock_t old)
85{ 121{
86 __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); 122 arch_spinlock_t new;
123
124 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
125
126 /* Perform the unlock on the "before" copy */
127 old.tickets.head += TICKET_LOCK_INC;
128
129 /* Clear the slowpath flag */
130 new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
131
132 /*
133 * If the lock is uncontended, clear the flag - use cmpxchg in
134 * case it changes behind our back though.
135 */
136 if (new.tickets.head != new.tickets.tail ||
137 cmpxchg(&lock->head_tail, old.head_tail,
138 new.head_tail) != old.head_tail) {
139 /*
140 * Lock still has someone queued for it, so wake up an
141 * appropriate waiter.
142 */
143 __ticket_unlock_kick(lock, old.tickets.head);
144 }
87} 145}
88 146
89static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 147static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
90{ 148{
91 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 149 if (TICKET_SLOWPATH_FLAG &&
150 static_key_false(&paravirt_ticketlocks_enabled)) {
151 arch_spinlock_t prev;
92 152
93 return tmp.tail != tmp.head; 153 prev = *lock;
94} 154 add_smp(&lock->tickets.head, TICKET_LOCK_INC);
95 155
96static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 156 /* add_smp() is a full mb() */
97{
98 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
99 157
100 return (__ticket_t)(tmp.tail - tmp.head) > 1; 158 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
159 __ticket_unlock_slowpath(lock, prev);
160 } else
161 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
101} 162}
102 163
103#ifndef CONFIG_PARAVIRT_SPINLOCKS
104
105static inline int arch_spin_is_locked(arch_spinlock_t *lock) 164static inline int arch_spin_is_locked(arch_spinlock_t *lock)
106{ 165{
107 return __ticket_spin_is_locked(lock); 166 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
108}
109
110static inline int arch_spin_is_contended(arch_spinlock_t *lock)
111{
112 return __ticket_spin_is_contended(lock);
113}
114#define arch_spin_is_contended arch_spin_is_contended
115 167
116static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 168 return tmp.tail != tmp.head;
117{
118 __ticket_spin_lock(lock);
119} 169}
120 170
121static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 171static inline int arch_spin_is_contended(arch_spinlock_t *lock)
122{ 172{
123 return __ticket_spin_trylock(lock); 173 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
124}
125 174
126static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 175 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
127{
128 __ticket_spin_unlock(lock);
129} 176}
177#define arch_spin_is_contended arch_spin_is_contended
130 178
131static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 179static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
132 unsigned long flags) 180 unsigned long flags)
@@ -134,8 +182,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
134 arch_spin_lock(lock); 182 arch_spin_lock(lock);
135} 183}
136 184
137#endif /* CONFIG_PARAVIRT_SPINLOCKS */
138
139static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 185static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
140{ 186{
141 while (arch_spin_is_locked(lock)) 187 while (arch_spin_is_locked(lock))
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index ad0ad07fc006..4f1bea19945b 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -1,13 +1,17 @@
1#ifndef _ASM_X86_SPINLOCK_TYPES_H 1#ifndef _ASM_X86_SPINLOCK_TYPES_H
2#define _ASM_X86_SPINLOCK_TYPES_H 2#define _ASM_X86_SPINLOCK_TYPES_H
3 3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8#include <linux/types.h> 4#include <linux/types.h>
9 5
10#if (CONFIG_NR_CPUS < 256) 6#ifdef CONFIG_PARAVIRT_SPINLOCKS
7#define __TICKET_LOCK_INC 2
8#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
9#else
10#define __TICKET_LOCK_INC 1
11#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
12#endif
13
14#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
11typedef u8 __ticket_t; 15typedef u8 __ticket_t;
12typedef u16 __ticketpair_t; 16typedef u16 __ticketpair_t;
13#else 17#else
@@ -15,6 +19,8 @@ typedef u16 __ticket_t;
15typedef u32 __ticketpair_t; 19typedef u32 __ticketpair_t;
16#endif 20#endif
17 21
22#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
23
18#define TICKET_SHIFT (sizeof(__ticket_t) * 8) 24#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
19 25
20typedef struct arch_spinlock { 26typedef struct arch_spinlock {