diff options
| -rw-r--r-- | arch/x86/xen/spinlock.c | 27 |
1 files changed, 22 insertions, 5 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index cc9b1e182fcf..d69cc6c3f808 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
| @@ -116,9 +116,26 @@ static inline void spin_time_accum_blocked(u64 start) | |||
| 116 | } | 116 | } |
| 117 | #endif /* CONFIG_XEN_DEBUG_FS */ | 117 | #endif /* CONFIG_XEN_DEBUG_FS */ |
| 118 | 118 | ||
| 119 | /* | ||
| 120 | * Size struct xen_spinlock so it's the same as arch_spinlock_t. | ||
| 121 | */ | ||
| 122 | #if NR_CPUS < 256 | ||
| 123 | typedef u8 xen_spinners_t; | ||
| 124 | # define inc_spinners(xl) \ | ||
| 125 | asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory"); | ||
| 126 | # define dec_spinners(xl) \ | ||
| 127 | asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory"); | ||
| 128 | #else | ||
| 129 | typedef u16 xen_spinners_t; | ||
| 130 | # define inc_spinners(xl) \ | ||
| 131 | asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory"); | ||
| 132 | # define dec_spinners(xl) \ | ||
| 133 | asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory"); | ||
| 134 | #endif | ||
| 135 | |||
| 119 | struct xen_spinlock { | 136 | struct xen_spinlock { |
| 120 | unsigned char lock; /* 0 -> free; 1 -> locked */ | 137 | unsigned char lock; /* 0 -> free; 1 -> locked */ |
| 121 | unsigned short spinners; /* count of waiting cpus */ | 138 | xen_spinners_t spinners; /* count of waiting cpus */ |
| 122 | }; | 139 | }; |
| 123 | 140 | ||
| 124 | static int xen_spin_is_locked(struct arch_spinlock *lock) | 141 | static int xen_spin_is_locked(struct arch_spinlock *lock) |
| @@ -164,8 +181,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) | |||
| 164 | 181 | ||
| 165 | wmb(); /* set lock of interest before count */ | 182 | wmb(); /* set lock of interest before count */ |
| 166 | 183 | ||
| 167 | asm(LOCK_PREFIX " incw %0" | 184 | inc_spinners(xl); |
| 168 | : "+m" (xl->spinners) : : "memory"); | ||
| 169 | 185 | ||
| 170 | return prev; | 186 | return prev; |
| 171 | } | 187 | } |
| @@ -176,8 +192,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) | |||
| 176 | */ | 192 | */ |
| 177 | static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) | 193 | static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) |
| 178 | { | 194 | { |
| 179 | asm(LOCK_PREFIX " decw %0" | 195 | dec_spinners(xl); |
| 180 | : "+m" (xl->spinners) : : "memory"); | ||
| 181 | wmb(); /* decrement count before restoring lock */ | 196 | wmb(); /* decrement count before restoring lock */ |
| 182 | __this_cpu_write(lock_spinners, prev); | 197 | __this_cpu_write(lock_spinners, prev); |
| 183 | } | 198 | } |
| @@ -373,6 +388,8 @@ void xen_uninit_lock_cpu(int cpu) | |||
| 373 | 388 | ||
| 374 | void __init xen_init_spinlocks(void) | 389 | void __init xen_init_spinlocks(void) |
| 375 | { | 390 | { |
| 391 | BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t)); | ||
| 392 | |||
| 376 | pv_lock_ops.spin_is_locked = xen_spin_is_locked; | 393 | pv_lock_ops.spin_is_locked = xen_spin_is_locked; |
| 377 | pv_lock_ops.spin_is_contended = xen_spin_is_contended; | 394 | pv_lock_ops.spin_is_contended = xen_spin_is_contended; |
| 378 | pv_lock_ops.spin_lock = xen_spin_lock; | 395 | pv_lock_ops.spin_lock = xen_spin_lock; |
