diff options
Diffstat (limited to 'include/linux/queuelock.h')
-rw-r--r-- | include/linux/queuelock.h | 120 |
1 files changed, 88 insertions, 32 deletions
diff --git a/include/linux/queuelock.h b/include/linux/queuelock.h index 044422b856..aaefdcaa4c 100644 --- a/include/linux/queuelock.h +++ b/include/linux/queuelock.h | |||
@@ -1,32 +1,88 @@ | |||
1 | #ifndef _UNC_QUEUE_LOCK_H_ | 1 | #ifndef _UNC_QUEUELOCK_H_ |
2 | #define _UNC_QUEUE_LOCK_H_ | 2 | #define _UNC_QUEUELOCK_H_ |
3 | /** | 3 | /** |
4 | * Queue lock | 4 | * Queue lock |
5 | * | 5 | * |
6 | * | 6 | * This is an implementation of T. Anderson's queue lock. |
7 | * CLEANUP: Q-Locks to be re-implemented properly for the new LITMUS. | 7 | * It strives to follow the normal Linux locking conventions |
8 | * Until then, we just use spin locks to make the code compile. | 8 | * as much as possible. The rules for acquiring a lock are: |
9 | */ | 9 | * |
10 | 10 | * 1) The caller must ensure interrupts and preemptions are disabled. | |
11 | #include <linux/spinlock.h> | 11 | * |
12 | 12 | * 2) The caller _cannot_ recursively acquire the lock. | |
13 | typedef spinlock_t queuelock_t; | 13 | * |
14 | 14 | * 3) The caller may not sleep while holding the lock. This is currently | |
15 | 15 | * not enforced, but it will not work. | |
16 | 16 | */ | |
17 | /* The new linux-like queue lock API. | 17 | |
18 | 18 | #include <linux/spinlock.h> | |
19 | int init_queuelock(queuelock_t* lock, int num_procs); | 19 | #include <linux/cache.h> |
20 | void queue_lock_wipe(queuelock_t * lock); | 20 | #include <asm/atomic.h> |
21 | void queue_lock(queuelock_t *); | 21 | #include <linux/cpumask.h> |
22 | void queue_unlock(queuelock_t *); | 22 | #include <linux/smp.h> |
23 | 23 | ||
24 | */ | 24 | typedef struct { |
25 | 25 | /* pad the values being spun on to make sure | |
26 | 26 | that they are cache local | |
27 | #define queue_lock_init(lock) spin_lock_init(lock) | 27 | */ |
28 | #define queue_lock_wipe(lock) spin_lock_init(lock) | 28 | union { |
29 | #define queue_lock(lock) spin_lock(lock) | 29 | enum { |
30 | #define queue_unlock(lock) spin_unlock(lock) | 30 | MUST_WAIT, |
31 | 31 | HAS_LOCK | |
32 | #endif /* _UNC_QUEUE_LOCK_H_ */ | 32 | } val; |
33 | char padding[SMP_CACHE_BYTES]; | ||
34 | } slots[NR_CPUS]; | ||
35 | |||
36 | /* since spin_slot is not being spun on it can be | ||
37 | * in a shared cache line. next_slot will be evicted | ||
38 | * anyway on every attempt to acquire the lock. | ||
39 | */ | ||
40 | int spin_slot[NR_CPUS]; | ||
41 | |||
42 | /* The next slot that will be available. | ||
43 | */ | ||
44 | atomic_t next_slot; | ||
45 | } queuelock_t; | ||
46 | |||
47 | |||
48 | static inline void queue_lock_init(queuelock_t *lock) | ||
49 | { | ||
50 | int i; | ||
51 | for_each_possible_cpu(i) { | ||
52 | lock->slots[i].val = MUST_WAIT; | ||
53 | lock->spin_slot[i] = i; | ||
54 | } | ||
55 | lock->slots[i].val = HAS_LOCK; | ||
56 | atomic_set(&lock->next_slot, 0); | ||
57 | } | ||
58 | |||
59 | |||
60 | static inline void queue_lock(queuelock_t *lock) | ||
61 | { | ||
62 | int me = smp_processor_id(); | ||
63 | /* Get slot to spin on. atomic_inc_return() returns the incremented | ||
64 | * value, so take one of again | ||
65 | */ | ||
66 | lock->spin_slot[me] = atomic_inc_return(&lock->next_slot) - 1; | ||
67 | /* check for wrap-around | ||
68 | * This could probably optimized away if we ensure that NR_CPUS divides | ||
69 | * INT_MAX... | ||
70 | */ | ||
71 | if (unlikely(lock->spin_slot[me] == NR_CPUS - 1)) | ||
72 | atomic_add(-NR_CPUS, &lock->next_slot); | ||
73 | /* range limit*/ | ||
74 | lock->spin_slot[me] %= NR_CPUS; | ||
75 | /* spin until you acquire the lock */ | ||
76 | while (lock->slots[lock->spin_slot[me]].val == MUST_WAIT); | ||
77 | /* reset the lock */ | ||
78 | lock->slots[lock->spin_slot[me]].val = MUST_WAIT; | ||
79 | } | ||
80 | |||
81 | |||
82 | static inline void queue_unlock(queuelock_t *lock) | ||
83 | { | ||
84 | int me = smp_processor_id(); | ||
85 | lock->slots[(lock->spin_slot[me] + 1) % NR_CPUS].val = HAS_LOCK; | ||
86 | } | ||
87 | |||
88 | #endif /* _UNC_QUEUELOCK_H_ */ | ||