diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 03:04:05 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 03:04:05 -0500 |
commit | 92907cbbef8625bb3998d1eb385fc88f23c97a3f (patch) | |
tree | 15626ff9287e37c3cb81c7286d6db5a7fd77c854 /include/asm-generic/mutex-dec.h | |
parent | 15fbfccfe92c62ae8d1ecc647c44157ed01ac02e (diff) | |
parent | 1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff) |
Merge tag 'v4.4-rc2' into drm-intel-next-queued
Linux 4.4-rc2
Backmerge to get at
commit 1b0e3a049efe471c399674fd954500ce97438d30
Author: Imre Deak <imre.deak@intel.com>
Date: Thu Nov 5 23:04:11 2015 +0200
drm/i915/skl: disable display side power well support for now
so that we can proplery re-eanble skl power wells in -next.
Conflicts are just adjacent lines changed, except for intel_fbdev.c
where we need to interleave the changs. Nothing nefarious.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'include/asm-generic/mutex-dec.h')
-rw-r--r-- | include/asm-generic/mutex-dec.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index d4f9fb4e53df..fd694cfd678a 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h | |||
@@ -20,7 +20,7 @@ | |||
20 | static inline void | 20 | static inline void |
21 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 21 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
22 | { | 22 | { |
23 | if (unlikely(atomic_dec_return(count) < 0)) | 23 | if (unlikely(atomic_dec_return_acquire(count) < 0)) |
24 | fail_fn(count); | 24 | fail_fn(count); |
25 | } | 25 | } |
26 | 26 | ||
@@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | |||
35 | static inline int | 35 | static inline int |
36 | __mutex_fastpath_lock_retval(atomic_t *count) | 36 | __mutex_fastpath_lock_retval(atomic_t *count) |
37 | { | 37 | { |
38 | if (unlikely(atomic_dec_return(count) < 0)) | 38 | if (unlikely(atomic_dec_return_acquire(count) < 0)) |
39 | return -1; | 39 | return -1; |
40 | return 0; | 40 | return 0; |
41 | } | 41 | } |
@@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count) | |||
56 | static inline void | 56 | static inline void |
57 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 57 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
58 | { | 58 | { |
59 | if (unlikely(atomic_inc_return(count) <= 0)) | 59 | if (unlikely(atomic_inc_return_release(count) <= 0)) |
60 | fail_fn(count); | 60 | fail_fn(count); |
61 | } | 61 | } |
62 | 62 | ||
@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | |||
80 | static inline int | 80 | static inline int |
81 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 81 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) |
82 | { | 82 | { |
83 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) | 83 | if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1)) |
84 | return 1; | 84 | return 1; |
85 | return 0; | 85 | return 0; |
86 | } | 86 | } |