diff options
-rw-r--r-- | lib/lockref.c | 13 |
1 files changed, 11 insertions, 2 deletions
diff --git a/lib/lockref.c b/lib/lockref.c index 677d036cf3c7..e294ae445c9a 100644 --- a/lib/lockref.c +++ b/lib/lockref.c | |||
@@ -4,6 +4,14 @@ | |||
4 | #ifdef CONFIG_CMPXCHG_LOCKREF | 4 | #ifdef CONFIG_CMPXCHG_LOCKREF |
5 | 5 | ||
6 | /* | 6 | /* |
7 | * Allow weakly-ordered memory architectures to provide barrier-less | ||
8 | * cmpxchg semantics for lockref updates. | ||
9 | */ | ||
10 | #ifndef cmpxchg64_relaxed | ||
11 | # define cmpxchg64_relaxed cmpxchg64 | ||
12 | #endif | ||
13 | |||
14 | /* | ||
7 | * Note that the "cmpxchg()" reloads the "old" value for the | 15 | * Note that the "cmpxchg()" reloads the "old" value for the |
8 | * failure case. | 16 | * failure case. |
9 | */ | 17 | */ |
@@ -14,8 +22,9 @@ | |||
14 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ | 22 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ |
15 | struct lockref new = old, prev = old; \ | 23 | struct lockref new = old, prev = old; \ |
16 | CODE \ | 24 | CODE \ |
17 | old.lock_count = cmpxchg64(&lockref->lock_count, \ | 25 | old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ |
18 | old.lock_count, new.lock_count); \ | 26 | old.lock_count, \ |
27 | new.lock_count); \ | ||
19 | if (likely(old.lock_count == prev.lock_count)) { \ | 28 | if (likely(old.lock_count == prev.lock_count)) { \ |
20 | SUCCESS; \ | 29 | SUCCESS; \ |
21 | } \ | 30 | } \ |