diff options
author | Michal Marek <mmarek@suse.cz> | 2014-01-02 08:02:06 -0500 |
---|---|---|
committer | Michal Marek <mmarek@suse.cz> | 2014-01-02 08:02:06 -0500 |
commit | 37e2c2a775fc887acd1432908478dfd532f7f00f (patch) | |
tree | e51ebc699d8e262fd47e0913be6a711cb1a7b565 /lib/lockref.c | |
parent | 1c8ddae09f4c102b97c9086cc70347e89468a547 (diff) | |
parent | 6ce4eac1f600b34f2f7f58f9cd8f0503d79e42ae (diff) |
Merge commit v3.13-rc1 into kbuild/misc
Diffstat (limited to 'lib/lockref.c')
-rw-r--r-- | lib/lockref.c | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/lib/lockref.c b/lib/lockref.c index e2cd2c0a8821..d2b123f8456b 100644 --- a/lib/lockref.c +++ b/lib/lockref.c | |||
@@ -1,7 +1,23 @@ | |||
1 | #include <linux/export.h> | 1 | #include <linux/export.h> |
2 | #include <linux/lockref.h> | 2 | #include <linux/lockref.h> |
3 | 3 | ||
4 | #ifdef CONFIG_CMPXCHG_LOCKREF | 4 | #if USE_CMPXCHG_LOCKREF |
5 | |||
6 | /* | ||
7 | * Allow weakly-ordered memory architectures to provide barrier-less | ||
8 | * cmpxchg semantics for lockref updates. | ||
9 | */ | ||
10 | #ifndef cmpxchg64_relaxed | ||
11 | # define cmpxchg64_relaxed cmpxchg64 | ||
12 | #endif | ||
13 | |||
14 | /* | ||
15 | * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. | ||
16 | * This is useful for architectures with an expensive cpu_relax(). | ||
17 | */ | ||
18 | #ifndef arch_mutex_cpu_relax | ||
19 | # define arch_mutex_cpu_relax() cpu_relax() | ||
20 | #endif | ||
5 | 21 | ||
6 | /* | 22 | /* |
7 | * Note that the "cmpxchg()" reloads the "old" value for the | 23 | * Note that the "cmpxchg()" reloads the "old" value for the |
@@ -14,12 +30,13 @@ | |||
14 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ | 30 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ |
15 | struct lockref new = old, prev = old; \ | 31 | struct lockref new = old, prev = old; \ |
16 | CODE \ | 32 | CODE \ |
17 | old.lock_count = cmpxchg(&lockref->lock_count, \ | 33 | old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ |
18 | old.lock_count, new.lock_count); \ | 34 | old.lock_count, \ |
35 | new.lock_count); \ | ||
19 | if (likely(old.lock_count == prev.lock_count)) { \ | 36 | if (likely(old.lock_count == prev.lock_count)) { \ |
20 | SUCCESS; \ | 37 | SUCCESS; \ |
21 | } \ | 38 | } \ |
22 | cpu_relax(); \ | 39 | arch_mutex_cpu_relax(); \ |
23 | } \ | 40 | } \ |
24 | } while (0) | 41 | } while (0) |
25 | 42 | ||
@@ -136,6 +153,7 @@ void lockref_mark_dead(struct lockref *lockref) | |||
136 | assert_spin_locked(&lockref->lock); | 153 | assert_spin_locked(&lockref->lock); |
137 | lockref->count = -128; | 154 | lockref->count = -128; |
138 | } | 155 | } |
156 | EXPORT_SYMBOL(lockref_mark_dead); | ||
139 | 157 | ||
140 | /** | 158 | /** |
141 | * lockref_get_not_dead - Increments count unless the ref is dead | 159 | * lockref_get_not_dead - Increments count unless the ref is dead |