diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-25 15:41:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-25 15:41:29 -0400 |
commit | c86ad14d305d2429c3da19462440bac50c183def (patch) | |
tree | bd794cd72476661faf82c440063c217bb978ce44 /kernel/sched | |
parent | a2303849a6b4b7ba59667091e00d6bb194071d9a (diff) | |
parent | f06628638cf6e75f179742b6c1b35076965b9fdd (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The locking tree was busier in this cycle than the usual pattern - a
couple of major projects happened to coincide.
The main changes are:
- implement the atomic_fetch_{add,sub,and,or,xor}() API natively
across all SMP architectures (Peter Zijlstra)
- add atomic_fetch_{inc/dec}() as well, using the generic primitives
(Davidlohr Bueso)
- optimize various aspects of rwsems (Jason Low, Davidlohr Bueso,
Waiman Long)
- optimize smp_cond_load_acquire() on arm64 and implement LSE based
atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
on arm64 (Will Deacon)
- introduce smp_acquire__after_ctrl_dep() and fix various barrier
mis-uses and bugs (Peter Zijlstra)
- after discovering ancient spin_unlock_wait() barrier bugs in its
implementation and usage, strengthen its semantics and update/fix
usage sites (Peter Zijlstra)
- optimize mutex_trylock() fastpath (Peter Zijlstra)
- ... misc fixes and cleanups"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits)
locking/atomic: Introduce inc/dec variants for the atomic_fetch_$op() API
locking/barriers, arch/arm64: Implement LDXR+WFE based smp_cond_load_acquire()
locking/static_keys: Fix non static symbol Sparse warning
locking/qspinlock: Use __this_cpu_dec() instead of full-blown this_cpu_dec()
locking/atomic, arch/tile: Fix tilepro build
locking/atomic, arch/m68k: Remove comment
locking/atomic, arch/arc: Fix build
locking/Documentation: Clarify limited control-dependency scope
locking/atomic, arch/rwsem: Employ atomic_long_fetch_add()
locking/atomic, arch/qrwlock: Employ atomic_fetch_add_acquire()
locking/atomic, arch/mips: Convert to _relaxed atomics
locking/atomic, arch/alpha: Convert to _relaxed atomics
locking/atomic: Remove the deprecated atomic_{set,clear}_mask() functions
locking/atomic: Remove linux/atomic.h:atomic_fetch_or()
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
locking/atomic: Fix atomic64_relaxed() bits
locking/atomic, arch/xtensa: Implement atomic_fetch_{add,sub,and,or,xor}()
locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
locking/atomic, arch/sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
...
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 8 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 |
2 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 97ee9ac7e97c..af0ef74df23c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1937,7 +1937,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) | |||
1937 | * chain to provide order. Instead we do: | 1937 | * chain to provide order. Instead we do: |
1938 | * | 1938 | * |
1939 | * 1) smp_store_release(X->on_cpu, 0) | 1939 | * 1) smp_store_release(X->on_cpu, 0) |
1940 | * 2) smp_cond_acquire(!X->on_cpu) | 1940 | * 2) smp_cond_load_acquire(!X->on_cpu) |
1941 | * | 1941 | * |
1942 | * Example: | 1942 | * Example: |
1943 | * | 1943 | * |
@@ -1948,7 +1948,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) | |||
1948 | * sched-out X | 1948 | * sched-out X |
1949 | * smp_store_release(X->on_cpu, 0); | 1949 | * smp_store_release(X->on_cpu, 0); |
1950 | * | 1950 | * |
1951 | * smp_cond_acquire(!X->on_cpu); | 1951 | * smp_cond_load_acquire(&X->on_cpu, !VAL); |
1952 | * X->state = WAKING | 1952 | * X->state = WAKING |
1953 | * set_task_cpu(X,2) | 1953 | * set_task_cpu(X,2) |
1954 | * | 1954 | * |
@@ -1974,7 +1974,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) | |||
1974 | * This means that any means of doing remote wakeups must order the CPU doing | 1974 | * This means that any means of doing remote wakeups must order the CPU doing |
1975 | * the wakeup against the CPU the task is going to end up running on. This, | 1975 | * the wakeup against the CPU the task is going to end up running on. This, |
1976 | * however, is already required for the regular Program-Order guarantee above, | 1976 | * however, is already required for the regular Program-Order guarantee above, |
1977 | * since the waking CPU is the one issueing the ACQUIRE (smp_cond_acquire). | 1977 | * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire). |
1978 | * | 1978 | * |
1979 | */ | 1979 | */ |
1980 | 1980 | ||
@@ -2047,7 +2047,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
2047 | * This ensures that tasks getting woken will be fully ordered against | 2047 | * This ensures that tasks getting woken will be fully ordered against |
2048 | * their previous state and preserve Program Order. | 2048 | * their previous state and preserve Program Order. |
2049 | */ | 2049 | */ |
2050 | smp_cond_acquire(!p->on_cpu); | 2050 | smp_cond_load_acquire(&p->on_cpu, !VAL); |
2051 | 2051 | ||
2052 | p->sched_contributes_to_load = !!task_contributes_to_load(p); | 2052 | p->sched_contributes_to_load = !!task_contributes_to_load(p); |
2053 | p->state = TASK_WAKING; | 2053 | p->state = TASK_WAKING; |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 898c0d2f18fe..81283592942b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1113,7 +1113,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
1113 | * In particular, the load of prev->state in finish_task_switch() must | 1113 | * In particular, the load of prev->state in finish_task_switch() must |
1114 | * happen before this. | 1114 | * happen before this. |
1115 | * | 1115 | * |
1116 | * Pairs with the smp_cond_acquire() in try_to_wake_up(). | 1116 | * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). |
1117 | */ | 1117 | */ |
1118 | smp_store_release(&prev->on_cpu, 0); | 1118 | smp_store_release(&prev->on_cpu, 0); |
1119 | #endif | 1119 | #endif |