diff options
Diffstat (limited to 'include/linux/mcs_spinlock.h')
-rw-r--r-- | include/linux/mcs_spinlock.h | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/include/linux/mcs_spinlock.h b/include/linux/mcs_spinlock.h new file mode 100644 index 000000000000..9578ef81940b --- /dev/null +++ b/include/linux/mcs_spinlock.h | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * MCS lock defines | ||
3 | * | ||
4 | * This file contains the main data structure and API definitions of MCS lock. | ||
5 | * | ||
6 | * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock | ||
7 | * with the desirable properties of being fair, and with each cpu trying | ||
8 | * to acquire the lock spinning on a local variable. | ||
9 | * It avoids expensive cache bouncings that common test-and-set spin-lock | ||
10 | * implementations incur. | ||
11 | */ | ||
12 | #ifndef __LINUX_MCS_SPINLOCK_H | ||
13 | #define __LINUX_MCS_SPINLOCK_H | ||
14 | |||
15 | struct mcs_spinlock { | ||
16 | struct mcs_spinlock *next; | ||
17 | int locked; /* 1 if lock acquired */ | ||
18 | }; | ||
19 | |||
20 | /* | ||
21 | * Note: the smp_load_acquire/smp_store_release pair is not | ||
22 | * sufficient to form a full memory barrier across | ||
23 | * cpus for many architectures (except x86) for mcs_unlock and mcs_lock. | ||
24 | * For applications that need a full barrier across multiple cpus | ||
25 | * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be | ||
26 | * used after mcs_lock. | ||
27 | */ | ||
28 | static inline | ||
29 | void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | ||
30 | { | ||
31 | struct mcs_spinlock *prev; | ||
32 | |||
33 | /* Init node */ | ||
34 | node->locked = 0; | ||
35 | node->next = NULL; | ||
36 | |||
37 | prev = xchg(lock, node); | ||
38 | if (likely(prev == NULL)) { | ||
39 | /* Lock acquired */ | ||
40 | node->locked = 1; | ||
41 | return; | ||
42 | } | ||
43 | ACCESS_ONCE(prev->next) = node; | ||
44 | /* | ||
45 | * Wait until the lock holder passes the lock down. | ||
46 | * Using smp_load_acquire() provides a memory barrier that | ||
47 | * ensures subsequent operations happen after the lock is acquired. | ||
48 | */ | ||
49 | while (!(smp_load_acquire(&node->locked))) | ||
50 | arch_mutex_cpu_relax(); | ||
51 | } | ||
52 | |||
53 | static inline | ||
54 | void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | ||
55 | { | ||
56 | struct mcs_spinlock *next = ACCESS_ONCE(node->next); | ||
57 | |||
58 | if (likely(!next)) { | ||
59 | /* | ||
60 | * Release the lock by setting it to NULL | ||
61 | */ | ||
62 | if (cmpxchg(lock, node, NULL) == node) | ||
63 | return; | ||
64 | /* Wait until the next pointer is set */ | ||
65 | while (!(next = ACCESS_ONCE(node->next))) | ||
66 | arch_mutex_cpu_relax(); | ||
67 | } | ||
68 | /* | ||
69 | * Pass lock to next waiter. | ||
70 | * smp_store_release() provides a memory barrier to ensure | ||
71 | * all operations in the critical section has been completed | ||
72 | * before unlocking. | ||
73 | */ | ||
74 | smp_store_release(&next->locked, 1); | ||
75 | } | ||
76 | |||
77 | #endif /* __LINUX_MCS_SPINLOCK_H */ | ||