diff options
-rw-r--r-- | include/litmus/rspinlock.h | 173 |
1 files changed, 173 insertions, 0 deletions
diff --git a/include/litmus/rspinlock.h b/include/litmus/rspinlock.h new file mode 100644 index 000000000000..d04c4033834c --- /dev/null +++ b/include/litmus/rspinlock.h | |||
@@ -0,0 +1,173 @@ | |||
1 | #ifndef LITMUS_RSPINLOCK_H | ||
2 | #define LITMUS_RSPINLOCK_H | ||
3 | |||
4 | #include <linux/spinlock.h> | ||
5 | |||
6 | /* recurisve raw spinlock implementation */ | ||
7 | |||
8 | typedef struct raw_rspinlock { | ||
9 | raw_spinlock_t baselock; | ||
10 | /* number of times lock held recursively */ | ||
11 | int rcount; | ||
12 | /* cpu that holds lock */ | ||
13 | atomic_t owner; | ||
14 | } raw_rspinlock_t; | ||
15 | |||
16 | /* initializers */ | ||
17 | |||
18 | #define raw_rspin_lock_init(lock) \ | ||
19 | do{\ | ||
20 | raw_spin_lock_init(&(lock)->baselock); \ | ||
21 | (lock)->rcount = 0; \ | ||
22 | atomic_set(&(lock)->owner, NO_CPU); \ | ||
23 | }while(0) | ||
24 | |||
25 | |||
26 | #define __RAW_RSPIN_LOCK_INITIALIZER(lockname) \ | ||
27 | {\ | ||
28 | .baselock = __RAW_SPIN_LOCK_INITIALIZER(lockname), \ | ||
29 | .rcount = 0, \ | ||
30 | .owner = ATOMIC_INIT(NO_CPU), \ | ||
31 | } | ||
32 | |||
33 | #define __RAW_RSPIN_LOCK_UNLOCKED(lockname) \ | ||
34 | (raw_rspinlock_t ) __RAW_RSPIN_LOCK_INITIALIZER(lockname) | ||
35 | |||
36 | /* for static initialization */ | ||
37 | #define DEFINE_RAW_RSPINLOCK(x) raw_rspinlock_t x = __RAW_RSPIN_LOCK_UNLOCKED(x) | ||
38 | |||
39 | |||
40 | /* lock calls */ | ||
41 | |||
42 | #define raw_rspin_lock_irqsave(lock, flags) \ | ||
43 | do {\ | ||
44 | if (unlikely(irqs_disabled() && \ | ||
45 | atomic_read(&(lock)->owner) == smp_processor_id())) { \ | ||
46 | local_irq_save(flags); /* useless. makes compiler happy though */ \ | ||
47 | ++(lock)->rcount; \ | ||
48 | } else { \ | ||
49 | raw_spin_lock_irqsave(&(lock)->baselock, flags); \ | ||
50 | atomic_set(&(lock)->owner, smp_processor_id()); \ | ||
51 | } \ | ||
52 | }while(0) | ||
53 | |||
54 | #define raw_rspin_lock(lock) \ | ||
55 | do {\ | ||
56 | if (unlikely(atomic_read(&(lock)->owner) == smp_processor_id())) { \ | ||
57 | ++(lock)->rcount; \ | ||
58 | } else { \ | ||
59 | raw_spin_lock(&(lock)->baselock); \ | ||
60 | atomic_set(&(lock)->owner, smp_processor_id()); \ | ||
61 | } \ | ||
62 | }while(0) | ||
63 | |||
64 | |||
65 | /* unlock calls */ | ||
66 | |||
67 | #define raw_rspin_unlock_irqrestore(lock, flags) \ | ||
68 | do {\ | ||
69 | if (unlikely((lock)->rcount > 0)) { \ | ||
70 | --(lock)->rcount; \ | ||
71 | local_irq_restore(flags); /* useless. makes compiler happy though */ \ | ||
72 | } else {\ | ||
73 | atomic_set(&(lock)->owner, NO_CPU); \ | ||
74 | raw_spin_unlock_irqrestore(&(lock)->baselock, flags); \ | ||
75 | }\ | ||
76 | }while(0) | ||
77 | |||
78 | #define raw_rspin_unlock(lock) \ | ||
79 | do {\ | ||
80 | if (unlikely((lock)->rcount > 0)) { \ | ||
81 | --(lock)->rcount; \ | ||
82 | } else {\ | ||
83 | atomic_set(&(lock)->owner, NO_CPU); \ | ||
84 | raw_spin_unlock(&(lock)->baselock); \ | ||
85 | }\ | ||
86 | }while(0) | ||
87 | |||
88 | |||
89 | |||
90 | |||
91 | /* recurisve spinlock implementation */ | ||
92 | |||
93 | typedef struct rspinlock { | ||
94 | spinlock_t baselock; | ||
95 | /* number of times lock held recursively */ | ||
96 | int rcount; | ||
97 | /* cpu that holds lock */ | ||
98 | atomic_t owner; | ||
99 | } rspinlock_t; | ||
100 | |||
101 | /* initializers */ | ||
102 | |||
103 | #define rspin_lock_init(lock) \ | ||
104 | do{\ | ||
105 | spin_lock_init(&(lock)->baselock); \ | ||
106 | (lock)->rcount = 0; \ | ||
107 | atomic_set(&(lock)->owner, NO_CPU); \ | ||
108 | }while(0) | ||
109 | |||
110 | |||
111 | #define __RSPIN_LOCK_INITIALIZER(lockname) \ | ||
112 | {\ | ||
113 | .baselock = __SPIN_LOCK_INITIALIZER(lockname), \ | ||
114 | .rcount = 0, \ | ||
115 | .owner = ATOMIC_INIT(NO_CPU), \ | ||
116 | } | ||
117 | |||
118 | #define __RSPIN_LOCK_UNLOCKED(lockname) \ | ||
119 | (rspinlock_t ) __RSPIN_LOCK_INITIALIZER(lockname) | ||
120 | |||
121 | /* for static initialization */ | ||
122 | #define DEFINE_RSPINLOCK(x) rspinlock_t x = __RSPIN_LOCK_UNLOCKED(x) | ||
123 | |||
124 | |||
125 | /* lock calls */ | ||
126 | |||
127 | #define rspin_lock_irqsave(lock, flags) \ | ||
128 | do {\ | ||
129 | if (unlikely(irqs_disabled() && \ | ||
130 | atomic_read(&(lock)->owner) == smp_processor_id())) { \ | ||
131 | local_irq_save(flags); /* useless. makes compiler happy though */ \ | ||
132 | ++(lock)->rcount; \ | ||
133 | } else { \ | ||
134 | spin_lock_irqsave(&(lock)->baselock, flags); \ | ||
135 | atomic_set(&(lock)->owner, smp_processor_id()); \ | ||
136 | } \ | ||
137 | }while(0) | ||
138 | |||
139 | #define rspin_lock(lock) \ | ||
140 | do {\ | ||
141 | if (unlikely(atomic_read(&(lock)->owner) == smp_processor_id())) { \ | ||
142 | ++(lock)->rcount; \ | ||
143 | } else { \ | ||
144 | spin_lock(&(lock)->baselock); \ | ||
145 | atomic_set(&(lock)->owner, smp_processor_id()); \ | ||
146 | } \ | ||
147 | }while(0) | ||
148 | |||
149 | |||
150 | /* unlock calls */ | ||
151 | |||
152 | #define rspin_unlock_irqrestore(lock, flags) \ | ||
153 | do {\ | ||
154 | if (unlikely((lock)->rcount > 0)) { \ | ||
155 | --(lock)->rcount; \ | ||
156 | local_irq_restore(flags); /* useless. makes compiler happy though */ \ | ||
157 | } else {\ | ||
158 | atomic_set(&(lock)->owner, NO_CPU); \ | ||
159 | spin_unlock_irqrestore(&(lock)->baselock, flags); \ | ||
160 | }\ | ||
161 | }while(0) | ||
162 | |||
163 | #define rspin_unlock(lock) \ | ||
164 | do {\ | ||
165 | if (unlikely((lock)->rcount > 0)) { \ | ||
166 | --(lock)->rcount; \ | ||
167 | } else {\ | ||
168 | atomic_set(&(lock)->owner, NO_CPU); \ | ||
169 | spin_unlock(&(lock)->baselock); \ | ||
170 | }\ | ||
171 | }while(0) | ||
172 | |||
173 | #endif \ No newline at end of file | ||