diff options
-rw-r--r-- | arch/x86/include/asm/spinlock.h | 4 | ||||
-rw-r--r-- | include/linux/spinlock.h | 14 | ||||
-rw-r--r-- | kernel/sched/core.c | 14 |
3 files changed, 24 insertions, 8 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 33692eaabab5..e3ddd7db723f 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
233 | #define arch_read_relax(lock) cpu_relax() | 233 | #define arch_read_relax(lock) cpu_relax() |
234 | #define arch_write_relax(lock) cpu_relax() | 234 | #define arch_write_relax(lock) cpu_relax() |
235 | 235 | ||
236 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ | ||
237 | static inline void smp_mb__after_lock(void) { } | ||
238 | #define ARCH_HAS_SMP_MB_AFTER_LOCK | ||
239 | |||
240 | #endif /* _ASM_X86_SPINLOCK_H */ | 236 | #endif /* _ASM_X86_SPINLOCK_H */ |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 7d537ced949a..75f34949d9ab 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -117,9 +117,17 @@ do { \ | |||
117 | #endif /*arch_spin_is_contended*/ | 117 | #endif /*arch_spin_is_contended*/ |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | /* The lock does not imply full memory barrier. */ | 120 | /* |
121 | #ifndef ARCH_HAS_SMP_MB_AFTER_LOCK | 121 | * Despite its name it doesn't necessarily has to be a full barrier. |
122 | static inline void smp_mb__after_lock(void) { smp_mb(); } | 122 | * It should only guarantee that a STORE before the critical section |
123 | * can not be reordered with a LOAD inside this section. | ||
124 | * spin_lock() is the one-way barrier, this LOAD can not escape out | ||
125 | * of the region. So the default implementation simply ensures that | ||
126 | * a STORE can not move into the critical section, smp_wmb() should | ||
127 | * serialize it with another STORE done by spin_lock(). | ||
128 | */ | ||
129 | #ifndef smp_mb__before_spinlock | ||
130 | #define smp_mb__before_spinlock() smp_wmb() | ||
123 | #endif | 131 | #endif |
124 | 132 | ||
125 | /** | 133 | /** |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b7c32cb7bfeb..ef51b0ef4bdc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1491,7 +1491,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
1491 | unsigned long flags; | 1491 | unsigned long flags; |
1492 | int cpu, success = 0; | 1492 | int cpu, success = 0; |
1493 | 1493 | ||
1494 | smp_wmb(); | 1494 | /* |
1495 | * If we are going to wake up a thread waiting for CONDITION we | ||
1496 | * need to ensure that CONDITION=1 done by the caller can not be | ||
1497 | * reordered with p->state check below. This pairs with mb() in | ||
1498 | * set_current_state() the waiting thread does. | ||
1499 | */ | ||
1500 | smp_mb__before_spinlock(); | ||
1495 | raw_spin_lock_irqsave(&p->pi_lock, flags); | 1501 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
1496 | if (!(p->state & state)) | 1502 | if (!(p->state & state)) |
1497 | goto out; | 1503 | goto out; |
@@ -2394,6 +2400,12 @@ need_resched: | |||
2394 | if (sched_feat(HRTICK)) | 2400 | if (sched_feat(HRTICK)) |
2395 | hrtick_clear(rq); | 2401 | hrtick_clear(rq); |
2396 | 2402 | ||
2403 | /* | ||
2404 | * Make sure that signal_pending_state()->signal_pending() below | ||
2405 | * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) | ||
2406 | * done by the caller to avoid the race with signal_wake_up(). | ||
2407 | */ | ||
2408 | smp_mb__before_spinlock(); | ||
2397 | raw_spin_lock_irq(&rq->lock); | 2409 | raw_spin_lock_irq(&rq->lock); |
2398 | 2410 | ||
2399 | switch_count = &prev->nivcsw; | 2411 | switch_count = &prev->nivcsw; |