aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2012-01-23 14:32:25 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-01-24 12:10:19 -0500
commit7a7546b377bdaa25ac77f33d9433c59f259b9688 (patch)
treeab4b43b4b3b50913e8f3abfe98190d0e0a472bf5 /arch/x86
parent8ea11f7f11c811d1f502c2dabc5259d447c2f2a0 (diff)
x86: xen: size struct xen_spinlock to always fit in arch_spinlock_t
If NR_CPUS < 256 then arch_spinlock_t is only 16 bits wide but struct xen_spinlock is 32 bits. When a spin lock is contended and xl->spinners is modified the two bytes immediately after the spin lock would be corrupted. This is a regression caused by 84eb950db13ca40a0572ce9957e14723500943d6 (x86, ticketlock: Clean up types and accessors) which reduced the size of arch_spinlock_t. Fix this by making xl->spinners a u8 if NR_CPUS < 256. A BUILD_BUG_ON() is also added to check the sizes of the two structures are compatible. In many cases this was not noticable as there would often be padding bytes after the lock (e.g., if any of CONFIG_GENERIC_LOCKBREAK, CONFIG_DEBUG_SPINLOCK, or CONFIG_DEBUG_LOCK_ALLOC were enabled). The bnx2 driver is affected. In struct bnx2, phy_lock and indirect_lock may have no padding after them. Contention on phy_lock would corrupt indirect_lock making it appear locked and the driver would deadlock. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org> Acked-by: Ian Campbell <ian.campbell@citrix.com> CC: stable@kernel.org #only 3.2 Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/spinlock.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index cc9b1e182fcf..d69cc6c3f808 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -116,9 +116,26 @@ static inline void spin_time_accum_blocked(u64 start)
116} 116}
117#endif /* CONFIG_XEN_DEBUG_FS */ 117#endif /* CONFIG_XEN_DEBUG_FS */
118 118
119/*
120 * Size struct xen_spinlock so it's the same as arch_spinlock_t.
121 */
122#if NR_CPUS < 256
123typedef u8 xen_spinners_t;
124# define inc_spinners(xl) \
125 asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
126# define dec_spinners(xl) \
127 asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
128#else
129typedef u16 xen_spinners_t;
130# define inc_spinners(xl) \
131 asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
132# define dec_spinners(xl) \
133 asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
134#endif
135
119struct xen_spinlock { 136struct xen_spinlock {
120 unsigned char lock; /* 0 -> free; 1 -> locked */ 137 unsigned char lock; /* 0 -> free; 1 -> locked */
121 unsigned short spinners; /* count of waiting cpus */ 138 xen_spinners_t spinners; /* count of waiting cpus */
122}; 139};
123 140
124static int xen_spin_is_locked(struct arch_spinlock *lock) 141static int xen_spin_is_locked(struct arch_spinlock *lock)
@@ -164,8 +181,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
164 181
165 wmb(); /* set lock of interest before count */ 182 wmb(); /* set lock of interest before count */
166 183
167 asm(LOCK_PREFIX " incw %0" 184 inc_spinners(xl);
168 : "+m" (xl->spinners) : : "memory");
169 185
170 return prev; 186 return prev;
171} 187}
@@ -176,8 +192,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
176 */ 192 */
177static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) 193static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
178{ 194{
179 asm(LOCK_PREFIX " decw %0" 195 dec_spinners(xl);
180 : "+m" (xl->spinners) : : "memory");
181 wmb(); /* decrement count before restoring lock */ 196 wmb(); /* decrement count before restoring lock */
182 __this_cpu_write(lock_spinners, prev); 197 __this_cpu_write(lock_spinners, prev);
183} 198}
@@ -373,6 +388,8 @@ void xen_uninit_lock_cpu(int cpu)
373 388
374void __init xen_init_spinlocks(void) 389void __init xen_init_spinlocks(void)
375{ 390{
391 BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
392
376 pv_lock_ops.spin_is_locked = xen_spin_is_locked; 393 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
377 pv_lock_ops.spin_is_contended = xen_spin_is_contended; 394 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
378 pv_lock_ops.spin_lock = xen_spin_lock; 395 pv_lock_ops.spin_lock = xen_spin_lock;