diff options
| -rw-r--r-- | arch/x86/xen/spinlock.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 2f91e565192..36a5141108d 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
| @@ -326,8 +326,13 @@ static void xen_spin_unlock(struct raw_spinlock *lock) | |||
| 326 | smp_wmb(); /* make sure no writes get moved after unlock */ | 326 | smp_wmb(); /* make sure no writes get moved after unlock */ |
| 327 | xl->lock = 0; /* release lock */ | 327 | xl->lock = 0; /* release lock */ |
| 328 | 328 | ||
| 329 | /* make sure unlock happens before kick */ | 329 | /* |
| 330 | barrier(); | 330 | * Make sure unlock happens before checking for waiting |
| 331 | * spinners. We need a strong barrier to enforce the | ||
| 332 | * write-read ordering to different memory locations, as the | ||
| 333 | * CPU makes no implied guarantees about their ordering. | ||
| 334 | */ | ||
| 335 | mb(); | ||
| 331 | 336 | ||
| 332 | if (unlikely(xl->spinners)) | 337 | if (unlikely(xl->spinners)) |
| 333 | xen_spin_unlock_slow(xl); | 338 | xen_spin_unlock_slow(xl); |
