diff options
Diffstat (limited to 'lib/lockref.c')
| -rw-r--r-- | lib/lockref.c | 23 |
1 files changed, 20 insertions, 3 deletions
diff --git a/lib/lockref.c b/lib/lockref.c index e2cd2c0a8821..6f9d434c1521 100644 --- a/lib/lockref.c +++ b/lib/lockref.c | |||
| @@ -4,6 +4,22 @@ | |||
| 4 | #ifdef CONFIG_CMPXCHG_LOCKREF | 4 | #ifdef CONFIG_CMPXCHG_LOCKREF |
| 5 | 5 | ||
| 6 | /* | 6 | /* |
| 7 | * Allow weakly-ordered memory architectures to provide barrier-less | ||
| 8 | * cmpxchg semantics for lockref updates. | ||
| 9 | */ | ||
| 10 | #ifndef cmpxchg64_relaxed | ||
| 11 | # define cmpxchg64_relaxed cmpxchg64 | ||
| 12 | #endif | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. | ||
| 16 | * This is useful for architectures with an expensive cpu_relax(). | ||
| 17 | */ | ||
| 18 | #ifndef arch_mutex_cpu_relax | ||
| 19 | # define arch_mutex_cpu_relax() cpu_relax() | ||
| 20 | #endif | ||
| 21 | |||
| 22 | /* | ||
| 7 | * Note that the "cmpxchg()" reloads the "old" value for the | 23 | * Note that the "cmpxchg()" reloads the "old" value for the |
| 8 | * failure case. | 24 | * failure case. |
| 9 | */ | 25 | */ |
| @@ -14,12 +30,13 @@ | |||
| 14 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ | 30 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ |
| 15 | struct lockref new = old, prev = old; \ | 31 | struct lockref new = old, prev = old; \ |
| 16 | CODE \ | 32 | CODE \ |
| 17 | old.lock_count = cmpxchg(&lockref->lock_count, \ | 33 | old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ |
| 18 | old.lock_count, new.lock_count); \ | 34 | old.lock_count, \ |
| 35 | new.lock_count); \ | ||
| 19 | if (likely(old.lock_count == prev.lock_count)) { \ | 36 | if (likely(old.lock_count == prev.lock_count)) { \ |
| 20 | SUCCESS; \ | 37 | SUCCESS; \ |
| 21 | } \ | 38 | } \ |
| 22 | cpu_relax(); \ | 39 | arch_mutex_cpu_relax(); \ |
| 23 | } \ | 40 | } \ |
| 24 | } while (0) | 41 | } while (0) |
| 25 | 42 | ||
