aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/spinlock.h5
-rw-r--r--include/linux/lockref.h11
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/lockref.c60
5 files changed, 84 insertions, 3 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b32ebf92b0ce..67e00740531c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -16,6 +16,7 @@ config X86_64
16 def_bool y 16 def_bool y
17 depends on 64BIT 17 depends on 64BIT
18 select X86_DEV_DMA_OPS 18 select X86_DEV_DMA_OPS
19 select ARCH_USE_CMPXCHG_LOCKREF
19 20
20### Arch settings 21### Arch settings
21config X86 22config X86
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index e3ddd7db723f..e0e668422c75 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -34,6 +34,11 @@
34# define UNLOCK_LOCK_PREFIX 34# define UNLOCK_LOCK_PREFIX
35#endif 35#endif
36 36
37static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
38{
39 return lock.tickets.head == lock.tickets.tail;
40}
41
37/* 42/*
38 * Ticket locks are conceptually two parts, one indicating the current head of 43 * Ticket locks are conceptually two parts, one indicating the current head of
39 * the queue, and the other indicating the current tail. The lock is acquired 44 * the queue, and the other indicating the current tail. The lock is acquired
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 4c0af31c8d47..ca07b5028b01 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -17,8 +17,15 @@
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18 18
19struct lockref { 19struct lockref {
20 spinlock_t lock; 20 union {
21 unsigned int count; 21#ifdef CONFIG_CMPXCHG_LOCKREF
22 aligned_u64 lock_count;
23#endif
24 struct {
25 spinlock_t lock;
26 unsigned int count;
27 };
28 };
22}; 29};
23 30
24extern void lockref_get(struct lockref *); 31extern void lockref_get(struct lockref *);
diff --git a/lib/Kconfig b/lib/Kconfig
index 71d9f81f6eed..65561716c16c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -48,6 +48,16 @@ config STMP_DEVICE
48config PERCPU_RWSEM 48config PERCPU_RWSEM
49 boolean 49 boolean
50 50
51config ARCH_USE_CMPXCHG_LOCKREF
52 bool
53
54config CMPXCHG_LOCKREF
55 def_bool y if ARCH_USE_CMPXCHG_LOCKREF
56 depends on SMP
57 depends on !GENERIC_LOCKBREAK
58 depends on !DEBUG_SPINLOCK
59 depends on !DEBUG_LOCK_ALLOC
60
51config CRC_CCITT 61config CRC_CCITT
52 tristate "CRC-CCITT functions" 62 tristate "CRC-CCITT functions"
53 help 63 help
diff --git a/lib/lockref.c b/lib/lockref.c
index a9a4f4e1eff5..7819c2d1d315 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,6 +1,33 @@
1#include <linux/export.h> 1#include <linux/export.h>
2#include <linux/lockref.h> 2#include <linux/lockref.h>
3 3
4#ifdef CONFIG_CMPXCHG_LOCKREF
5
6/*
7 * Note that the "cmpxchg()" reloads the "old" value for the
8 * failure case.
9 */
10#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
11 struct lockref old; \
12 BUILD_BUG_ON(sizeof(old) != 8); \
13 old.lock_count = ACCESS_ONCE(lockref->lock_count); \
14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
15 struct lockref new = old, prev = old; \
16 CODE \
17 old.lock_count = cmpxchg(&lockref->lock_count, \
18 old.lock_count, new.lock_count); \
19 if (likely(old.lock_count == prev.lock_count)) { \
20 SUCCESS; \
21 } \
22 } \
23} while (0)
24
25#else
26
27#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
28
29#endif
30
4/** 31/**
5 * lockref_get - Increments reference count unconditionally 32 * lockref_get - Increments reference count unconditionally
6 * @lockcnt: pointer to lockref structure 33 * @lockcnt: pointer to lockref structure
@@ -10,6 +37,12 @@
10 */ 37 */
11void lockref_get(struct lockref *lockref) 38void lockref_get(struct lockref *lockref)
12{ 39{
40 CMPXCHG_LOOP(
41 new.count++;
42 ,
43 return;
44 );
45
13 spin_lock(&lockref->lock); 46 spin_lock(&lockref->lock);
14 lockref->count++; 47 lockref->count++;
15 spin_unlock(&lockref->lock); 48 spin_unlock(&lockref->lock);
@@ -23,9 +56,18 @@ EXPORT_SYMBOL(lockref_get);
23 */ 56 */
24int lockref_get_not_zero(struct lockref *lockref) 57int lockref_get_not_zero(struct lockref *lockref)
25{ 58{
26 int retval = 0; 59 int retval;
60
61 CMPXCHG_LOOP(
62 new.count++;
63 if (!old.count)
64 return 0;
65 ,
66 return 1;
67 );
27 68
28 spin_lock(&lockref->lock); 69 spin_lock(&lockref->lock);
70 retval = 0;
29 if (lockref->count) { 71 if (lockref->count) {
30 lockref->count++; 72 lockref->count++;
31 retval = 1; 73 retval = 1;
@@ -43,6 +85,14 @@ EXPORT_SYMBOL(lockref_get_not_zero);
43 */ 85 */
44int lockref_get_or_lock(struct lockref *lockref) 86int lockref_get_or_lock(struct lockref *lockref)
45{ 87{
88 CMPXCHG_LOOP(
89 new.count++;
90 if (!old.count)
91 break;
92 ,
93 return 1;
94 );
95
46 spin_lock(&lockref->lock); 96 spin_lock(&lockref->lock);
47 if (!lockref->count) 97 if (!lockref->count)
48 return 0; 98 return 0;
@@ -59,6 +109,14 @@ EXPORT_SYMBOL(lockref_get_or_lock);
59 */ 109 */
60int lockref_put_or_lock(struct lockref *lockref) 110int lockref_put_or_lock(struct lockref *lockref)
61{ 111{
112 CMPXCHG_LOOP(
113 new.count--;
114 if (old.count <= 1)
115 break;
116 ,
117 return 1;
118 );
119
62 spin_lock(&lockref->lock); 120 spin_lock(&lockref->lock);
63 if (lockref->count <= 1) 121 if (lockref->count <= 1)
64 return 0; 122 return 0;