diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2013-08-09 10:21:56 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2013-08-09 10:53:50 -0400 |
commit | 4a1ed4ca681e7df38ed1b609a11aab38cbc515b3 (patch) | |
tree | 54206caf9c98f99df114613a160d816e4a08f9e7 /arch/x86 | |
parent | 354714dd2607778692db53947ab93b74956494e5 (diff) |
x86, pvticketlock: When paravirtualizing ticket locks, increment by 2
Increment ticket head/tails by 2 rather than 1 to leave the LSB free
to store a "is in slowpath state" bit. This halves the number
of possible CPUs for a given ticket size, but this shouldn't matter
in practice - kernels built for 32k+ CPU systems are probably
specially built for the hardware rather than a generic distro
kernel.
Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Link: http://lkml.kernel.org/r/1376058122-8248-9-git-send-email-raghavendra.kt@linux.vnet.ibm.com
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Attilio Rao <attilio.rao@citrix.com>
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/spinlock.h | 10 | ||||
-rw-r--r-- | arch/x86/include/asm/spinlock_types.h | 10 |
2 files changed, 14 insertions, 6 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 744241048a11..04a5cd5e97cb 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -78,7 +78,7 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, | |||
78 | */ | 78 | */ |
79 | static __always_inline void arch_spin_lock(struct arch_spinlock *lock) | 79 | static __always_inline void arch_spin_lock(struct arch_spinlock *lock) |
80 | { | 80 | { |
81 | register struct __raw_tickets inc = { .tail = 1 }; | 81 | register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC }; |
82 | 82 | ||
83 | inc = xadd(&lock->tickets, inc); | 83 | inc = xadd(&lock->tickets, inc); |
84 | 84 | ||
@@ -104,7 +104,7 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
104 | if (old.tickets.head != old.tickets.tail) | 104 | if (old.tickets.head != old.tickets.tail) |
105 | return 0; | 105 | return 0; |
106 | 106 | ||
107 | new.head_tail = old.head_tail + (1 << TICKET_SHIFT); | 107 | new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); |
108 | 108 | ||
109 | /* cmpxchg is a full barrier, so nothing can move before it */ | 109 | /* cmpxchg is a full barrier, so nothing can move before it */ |
110 | return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; | 110 | return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; |
@@ -112,9 +112,9 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
112 | 112 | ||
113 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) | 113 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
114 | { | 114 | { |
115 | __ticket_t next = lock->tickets.head + 1; | 115 | __ticket_t next = lock->tickets.head + TICKET_LOCK_INC; |
116 | 116 | ||
117 | __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); | 117 | __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); |
118 | __ticket_unlock_kick(lock, next); | 118 | __ticket_unlock_kick(lock, next); |
119 | } | 119 | } |
120 | 120 | ||
@@ -129,7 +129,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock) | |||
129 | { | 129 | { |
130 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); | 130 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); |
131 | 131 | ||
132 | return (__ticket_t)(tmp.tail - tmp.head) > 1; | 132 | return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; |
133 | } | 133 | } |
134 | #define arch_spin_is_contended arch_spin_is_contended | 134 | #define arch_spin_is_contended arch_spin_is_contended |
135 | 135 | ||
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 83fd3c75d45c..e96fcbdfbc07 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h | |||
@@ -3,7 +3,13 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | #if (CONFIG_NR_CPUS < 256) | 6 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
7 | #define __TICKET_LOCK_INC 2 | ||
8 | #else | ||
9 | #define __TICKET_LOCK_INC 1 | ||
10 | #endif | ||
11 | |||
12 | #if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC)) | ||
7 | typedef u8 __ticket_t; | 13 | typedef u8 __ticket_t; |
8 | typedef u16 __ticketpair_t; | 14 | typedef u16 __ticketpair_t; |
9 | #else | 15 | #else |
@@ -11,6 +17,8 @@ typedef u16 __ticket_t; | |||
11 | typedef u32 __ticketpair_t; | 17 | typedef u32 __ticketpair_t; |
12 | #endif | 18 | #endif |
13 | 19 | ||
20 | #define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC) | ||
21 | |||
14 | #define TICKET_SHIFT (sizeof(__ticket_t) * 8) | 22 | #define TICKET_SHIFT (sizeof(__ticket_t) * 8) |
15 | 23 | ||
16 | typedef struct arch_spinlock { | 24 | typedef struct arch_spinlock { |