diff options
Diffstat (limited to 'fs/gfs2/locking/dlm/lock.c')
-rw-r--r-- | fs/gfs2/locking/dlm/lock.c | 353 |
1 files changed, 287 insertions, 66 deletions
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c index fed9a67be0f1..871ffc9578f2 100644 --- a/fs/gfs2/locking/dlm/lock.c +++ b/fs/gfs2/locking/dlm/lock.c | |||
@@ -11,46 +11,63 @@ | |||
11 | 11 | ||
12 | static char junk_lvb[GDLM_LVB_SIZE]; | 12 | static char junk_lvb[GDLM_LVB_SIZE]; |
13 | 13 | ||
14 | static void queue_complete(struct gdlm_lock *lp) | 14 | |
15 | /* convert dlm lock-mode to gfs lock-state */ | ||
16 | |||
17 | static s16 gdlm_make_lmstate(s16 dlmmode) | ||
15 | { | 18 | { |
16 | struct gdlm_ls *ls = lp->ls; | 19 | switch (dlmmode) { |
20 | case DLM_LOCK_IV: | ||
21 | case DLM_LOCK_NL: | ||
22 | return LM_ST_UNLOCKED; | ||
23 | case DLM_LOCK_EX: | ||
24 | return LM_ST_EXCLUSIVE; | ||
25 | case DLM_LOCK_CW: | ||
26 | return LM_ST_DEFERRED; | ||
27 | case DLM_LOCK_PR: | ||
28 | return LM_ST_SHARED; | ||
29 | } | ||
30 | gdlm_assert(0, "unknown DLM mode %d", dlmmode); | ||
31 | return -1; | ||
32 | } | ||
17 | 33 | ||
18 | clear_bit(LFL_ACTIVE, &lp->flags); | 34 | /* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm |
35 | thread gets to it. */ | ||
36 | |||
37 | static void queue_submit(struct gdlm_lock *lp) | ||
38 | { | ||
39 | struct gdlm_ls *ls = lp->ls; | ||
19 | 40 | ||
20 | spin_lock(&ls->async_lock); | 41 | spin_lock(&ls->async_lock); |
21 | list_add_tail(&lp->clist, &ls->complete); | 42 | list_add_tail(&lp->delay_list, &ls->submit); |
22 | spin_unlock(&ls->async_lock); | 43 | spin_unlock(&ls->async_lock); |
23 | wake_up(&ls->thread_wait); | 44 | wake_up(&ls->thread_wait); |
24 | } | 45 | } |
25 | 46 | ||
26 | static inline void gdlm_ast(void *astarg) | 47 | static void wake_up_ast(struct gdlm_lock *lp) |
27 | { | 48 | { |
28 | queue_complete(astarg); | 49 | clear_bit(LFL_AST_WAIT, &lp->flags); |
50 | smp_mb__after_clear_bit(); | ||
51 | wake_up_bit(&lp->flags, LFL_AST_WAIT); | ||
29 | } | 52 | } |
30 | 53 | ||
31 | static inline void gdlm_bast(void *astarg, int mode) | 54 | static void gdlm_delete_lp(struct gdlm_lock *lp) |
32 | { | 55 | { |
33 | struct gdlm_lock *lp = astarg; | ||
34 | struct gdlm_ls *ls = lp->ls; | 56 | struct gdlm_ls *ls = lp->ls; |
35 | 57 | ||
36 | if (!mode) { | ||
37 | printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n", | ||
38 | lp->lockname.ln_type, | ||
39 | (unsigned long long)lp->lockname.ln_number); | ||
40 | return; | ||
41 | } | ||
42 | |||
43 | spin_lock(&ls->async_lock); | 58 | spin_lock(&ls->async_lock); |
44 | if (!lp->bast_mode) { | 59 | if (!list_empty(&lp->delay_list)) |
45 | list_add_tail(&lp->blist, &ls->blocking); | 60 | list_del_init(&lp->delay_list); |
46 | lp->bast_mode = mode; | 61 | gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type, |
47 | } else if (lp->bast_mode < mode) | 62 | (unsigned long long)lp->lockname.ln_number); |
48 | lp->bast_mode = mode; | 63 | list_del_init(&lp->all_list); |
64 | ls->all_locks_count--; | ||
49 | spin_unlock(&ls->async_lock); | 65 | spin_unlock(&ls->async_lock); |
50 | wake_up(&ls->thread_wait); | 66 | |
67 | kfree(lp); | ||
51 | } | 68 | } |
52 | 69 | ||
53 | void gdlm_queue_delayed(struct gdlm_lock *lp) | 70 | static void gdlm_queue_delayed(struct gdlm_lock *lp) |
54 | { | 71 | { |
55 | struct gdlm_ls *ls = lp->ls; | 72 | struct gdlm_ls *ls = lp->ls; |
56 | 73 | ||
@@ -59,6 +76,249 @@ void gdlm_queue_delayed(struct gdlm_lock *lp) | |||
59 | spin_unlock(&ls->async_lock); | 76 | spin_unlock(&ls->async_lock); |
60 | } | 77 | } |
61 | 78 | ||
79 | static void process_complete(struct gdlm_lock *lp) | ||
80 | { | ||
81 | struct gdlm_ls *ls = lp->ls; | ||
82 | struct lm_async_cb acb; | ||
83 | s16 prev_mode = lp->cur; | ||
84 | |||
85 | memset(&acb, 0, sizeof(acb)); | ||
86 | |||
87 | if (lp->lksb.sb_status == -DLM_ECANCEL) { | ||
88 | log_info("complete dlm cancel %x,%llx flags %lx", | ||
89 | lp->lockname.ln_type, | ||
90 | (unsigned long long)lp->lockname.ln_number, | ||
91 | lp->flags); | ||
92 | |||
93 | lp->req = lp->cur; | ||
94 | acb.lc_ret |= LM_OUT_CANCELED; | ||
95 | if (lp->cur == DLM_LOCK_IV) | ||
96 | lp->lksb.sb_lkid = 0; | ||
97 | goto out; | ||
98 | } | ||
99 | |||
100 | if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) { | ||
101 | if (lp->lksb.sb_status != -DLM_EUNLOCK) { | ||
102 | log_info("unlock sb_status %d %x,%llx flags %lx", | ||
103 | lp->lksb.sb_status, lp->lockname.ln_type, | ||
104 | (unsigned long long)lp->lockname.ln_number, | ||
105 | lp->flags); | ||
106 | return; | ||
107 | } | ||
108 | |||
109 | lp->cur = DLM_LOCK_IV; | ||
110 | lp->req = DLM_LOCK_IV; | ||
111 | lp->lksb.sb_lkid = 0; | ||
112 | |||
113 | if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) { | ||
114 | gdlm_delete_lp(lp); | ||
115 | return; | ||
116 | } | ||
117 | goto out; | ||
118 | } | ||
119 | |||
120 | if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID) | ||
121 | memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); | ||
122 | |||
123 | if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) { | ||
124 | if (lp->req == DLM_LOCK_PR) | ||
125 | lp->req = DLM_LOCK_CW; | ||
126 | else if (lp->req == DLM_LOCK_CW) | ||
127 | lp->req = DLM_LOCK_PR; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * A canceled lock request. The lock was just taken off the delayed | ||
132 | * list and was never even submitted to dlm. | ||
133 | */ | ||
134 | |||
135 | if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) { | ||
136 | log_info("complete internal cancel %x,%llx", | ||
137 | lp->lockname.ln_type, | ||
138 | (unsigned long long)lp->lockname.ln_number); | ||
139 | lp->req = lp->cur; | ||
140 | acb.lc_ret |= LM_OUT_CANCELED; | ||
141 | goto out; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * An error occured. | ||
146 | */ | ||
147 | |||
148 | if (lp->lksb.sb_status) { | ||
149 | /* a "normal" error */ | ||
150 | if ((lp->lksb.sb_status == -EAGAIN) && | ||
151 | (lp->lkf & DLM_LKF_NOQUEUE)) { | ||
152 | lp->req = lp->cur; | ||
153 | if (lp->cur == DLM_LOCK_IV) | ||
154 | lp->lksb.sb_lkid = 0; | ||
155 | goto out; | ||
156 | } | ||
157 | |||
158 | /* this could only happen with cancels I think */ | ||
159 | log_info("ast sb_status %d %x,%llx flags %lx", | ||
160 | lp->lksb.sb_status, lp->lockname.ln_type, | ||
161 | (unsigned long long)lp->lockname.ln_number, | ||
162 | lp->flags); | ||
163 | if (lp->lksb.sb_status == -EDEADLOCK && | ||
164 | lp->ls->fsflags & LM_MFLAG_CONV_NODROP) { | ||
165 | lp->req = lp->cur; | ||
166 | acb.lc_ret |= LM_OUT_CONV_DEADLK; | ||
167 | if (lp->cur == DLM_LOCK_IV) | ||
168 | lp->lksb.sb_lkid = 0; | ||
169 | goto out; | ||
170 | } else | ||
171 | return; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * This is an AST for an EX->EX conversion for sync_lvb from GFS. | ||
176 | */ | ||
177 | |||
178 | if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) { | ||
179 | wake_up_ast(lp); | ||
180 | return; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * A lock has been demoted to NL because it initially completed during | ||
185 | * BLOCK_LOCKS. Now it must be requested in the originally requested | ||
186 | * mode. | ||
187 | */ | ||
188 | |||
189 | if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) { | ||
190 | gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx", | ||
191 | lp->lockname.ln_type, | ||
192 | (unsigned long long)lp->lockname.ln_number); | ||
193 | gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx", | ||
194 | lp->lockname.ln_type, | ||
195 | (unsigned long long)lp->lockname.ln_number); | ||
196 | |||
197 | lp->cur = DLM_LOCK_NL; | ||
198 | lp->req = lp->prev_req; | ||
199 | lp->prev_req = DLM_LOCK_IV; | ||
200 | lp->lkf &= ~DLM_LKF_CONVDEADLK; | ||
201 | |||
202 | set_bit(LFL_NOCACHE, &lp->flags); | ||
203 | |||
204 | if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && | ||
205 | !test_bit(LFL_NOBLOCK, &lp->flags)) | ||
206 | gdlm_queue_delayed(lp); | ||
207 | else | ||
208 | queue_submit(lp); | ||
209 | return; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * A request is granted during dlm recovery. It may be granted | ||
214 | * because the locks of a failed node were cleared. In that case, | ||
215 | * there may be inconsistent data beneath this lock and we must wait | ||
216 | * for recovery to complete to use it. When gfs recovery is done this | ||
217 | * granted lock will be converted to NL and then reacquired in this | ||
218 | * granted state. | ||
219 | */ | ||
220 | |||
221 | if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && | ||
222 | !test_bit(LFL_NOBLOCK, &lp->flags) && | ||
223 | lp->req != DLM_LOCK_NL) { | ||
224 | |||
225 | lp->cur = lp->req; | ||
226 | lp->prev_req = lp->req; | ||
227 | lp->req = DLM_LOCK_NL; | ||
228 | lp->lkf |= DLM_LKF_CONVERT; | ||
229 | lp->lkf &= ~DLM_LKF_CONVDEADLK; | ||
230 | |||
231 | log_debug("rereq %x,%llx id %x %d,%d", | ||
232 | lp->lockname.ln_type, | ||
233 | (unsigned long long)lp->lockname.ln_number, | ||
234 | lp->lksb.sb_lkid, lp->cur, lp->req); | ||
235 | |||
236 | set_bit(LFL_REREQUEST, &lp->flags); | ||
237 | queue_submit(lp); | ||
238 | return; | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * DLM demoted the lock to NL before it was granted so GFS must be | ||
243 | * told it cannot cache data for this lock. | ||
244 | */ | ||
245 | |||
246 | if (lp->lksb.sb_flags & DLM_SBF_DEMOTED) | ||
247 | set_bit(LFL_NOCACHE, &lp->flags); | ||
248 | |||
249 | out: | ||
250 | /* | ||
251 | * This is an internal lock_dlm lock | ||
252 | */ | ||
253 | |||
254 | if (test_bit(LFL_INLOCK, &lp->flags)) { | ||
255 | clear_bit(LFL_NOBLOCK, &lp->flags); | ||
256 | lp->cur = lp->req; | ||
257 | wake_up_ast(lp); | ||
258 | return; | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Normal completion of a lock request. Tell GFS it now has the lock. | ||
263 | */ | ||
264 | |||
265 | clear_bit(LFL_NOBLOCK, &lp->flags); | ||
266 | lp->cur = lp->req; | ||
267 | |||
268 | acb.lc_name = lp->lockname; | ||
269 | acb.lc_ret |= gdlm_make_lmstate(lp->cur); | ||
270 | |||
271 | if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) && | ||
272 | (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL)) | ||
273 | acb.lc_ret |= LM_OUT_CACHEABLE; | ||
274 | |||
275 | ls->fscb(ls->sdp, LM_CB_ASYNC, &acb); | ||
276 | } | ||
277 | |||
278 | static void gdlm_ast(void *astarg) | ||
279 | { | ||
280 | struct gdlm_lock *lp = astarg; | ||
281 | clear_bit(LFL_ACTIVE, &lp->flags); | ||
282 | process_complete(lp); | ||
283 | } | ||
284 | |||
285 | static void process_blocking(struct gdlm_lock *lp, int bast_mode) | ||
286 | { | ||
287 | struct gdlm_ls *ls = lp->ls; | ||
288 | unsigned int cb = 0; | ||
289 | |||
290 | switch (gdlm_make_lmstate(bast_mode)) { | ||
291 | case LM_ST_EXCLUSIVE: | ||
292 | cb = LM_CB_NEED_E; | ||
293 | break; | ||
294 | case LM_ST_DEFERRED: | ||
295 | cb = LM_CB_NEED_D; | ||
296 | break; | ||
297 | case LM_ST_SHARED: | ||
298 | cb = LM_CB_NEED_S; | ||
299 | break; | ||
300 | default: | ||
301 | gdlm_assert(0, "unknown bast mode %u", bast_mode); | ||
302 | } | ||
303 | |||
304 | ls->fscb(ls->sdp, cb, &lp->lockname); | ||
305 | } | ||
306 | |||
307 | |||
308 | static void gdlm_bast(void *astarg, int mode) | ||
309 | { | ||
310 | struct gdlm_lock *lp = astarg; | ||
311 | |||
312 | if (!mode) { | ||
313 | printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n", | ||
314 | lp->lockname.ln_type, | ||
315 | (unsigned long long)lp->lockname.ln_number); | ||
316 | return; | ||
317 | } | ||
318 | |||
319 | process_blocking(lp, mode); | ||
320 | } | ||
321 | |||
62 | /* convert gfs lock-state to dlm lock-mode */ | 322 | /* convert gfs lock-state to dlm lock-mode */ |
63 | 323 | ||
64 | static s16 make_mode(s16 lmstate) | 324 | static s16 make_mode(s16 lmstate) |
@@ -77,24 +337,6 @@ static s16 make_mode(s16 lmstate) | |||
77 | return -1; | 337 | return -1; |
78 | } | 338 | } |
79 | 339 | ||
80 | /* convert dlm lock-mode to gfs lock-state */ | ||
81 | |||
82 | s16 gdlm_make_lmstate(s16 dlmmode) | ||
83 | { | ||
84 | switch (dlmmode) { | ||
85 | case DLM_LOCK_IV: | ||
86 | case DLM_LOCK_NL: | ||
87 | return LM_ST_UNLOCKED; | ||
88 | case DLM_LOCK_EX: | ||
89 | return LM_ST_EXCLUSIVE; | ||
90 | case DLM_LOCK_CW: | ||
91 | return LM_ST_DEFERRED; | ||
92 | case DLM_LOCK_PR: | ||
93 | return LM_ST_SHARED; | ||
94 | } | ||
95 | gdlm_assert(0, "unknown DLM mode %d", dlmmode); | ||
96 | return -1; | ||
97 | } | ||
98 | 340 | ||
99 | /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and | 341 | /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and |
100 | DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */ | 342 | DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */ |
@@ -173,10 +415,6 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name, | |||
173 | make_strname(name, &lp->strname); | 415 | make_strname(name, &lp->strname); |
174 | lp->ls = ls; | 416 | lp->ls = ls; |
175 | lp->cur = DLM_LOCK_IV; | 417 | lp->cur = DLM_LOCK_IV; |
176 | lp->lvb = NULL; | ||
177 | lp->hold_null = NULL; | ||
178 | INIT_LIST_HEAD(&lp->clist); | ||
179 | INIT_LIST_HEAD(&lp->blist); | ||
180 | INIT_LIST_HEAD(&lp->delay_list); | 418 | INIT_LIST_HEAD(&lp->delay_list); |
181 | 419 | ||
182 | spin_lock(&ls->async_lock); | 420 | spin_lock(&ls->async_lock); |
@@ -188,26 +426,6 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name, | |||
188 | return 0; | 426 | return 0; |
189 | } | 427 | } |
190 | 428 | ||
191 | void gdlm_delete_lp(struct gdlm_lock *lp) | ||
192 | { | ||
193 | struct gdlm_ls *ls = lp->ls; | ||
194 | |||
195 | spin_lock(&ls->async_lock); | ||
196 | if (!list_empty(&lp->clist)) | ||
197 | list_del_init(&lp->clist); | ||
198 | if (!list_empty(&lp->blist)) | ||
199 | list_del_init(&lp->blist); | ||
200 | if (!list_empty(&lp->delay_list)) | ||
201 | list_del_init(&lp->delay_list); | ||
202 | gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type, | ||
203 | (unsigned long long)lp->lockname.ln_number); | ||
204 | list_del_init(&lp->all_list); | ||
205 | ls->all_locks_count--; | ||
206 | spin_unlock(&ls->async_lock); | ||
207 | |||
208 | kfree(lp); | ||
209 | } | ||
210 | |||
211 | int gdlm_get_lock(void *lockspace, struct lm_lockname *name, | 429 | int gdlm_get_lock(void *lockspace, struct lm_lockname *name, |
212 | void **lockp) | 430 | void **lockp) |
213 | { | 431 | { |
@@ -261,7 +479,7 @@ unsigned int gdlm_do_lock(struct gdlm_lock *lp) | |||
261 | 479 | ||
262 | if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) { | 480 | if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) { |
263 | lp->lksb.sb_status = -EAGAIN; | 481 | lp->lksb.sb_status = -EAGAIN; |
264 | queue_complete(lp); | 482 | gdlm_ast(lp); |
265 | error = 0; | 483 | error = 0; |
266 | } | 484 | } |
267 | 485 | ||
@@ -311,6 +529,9 @@ unsigned int gdlm_lock(void *lock, unsigned int cur_state, | |||
311 | if (req_state == LM_ST_UNLOCKED) | 529 | if (req_state == LM_ST_UNLOCKED) |
312 | return gdlm_unlock(lock, cur_state); | 530 | return gdlm_unlock(lock, cur_state); |
313 | 531 | ||
532 | if (req_state == LM_ST_UNLOCKED) | ||
533 | return gdlm_unlock(lock, cur_state); | ||
534 | |||
314 | clear_bit(LFL_DLM_CANCEL, &lp->flags); | 535 | clear_bit(LFL_DLM_CANCEL, &lp->flags); |
315 | if (flags & LM_FLAG_NOEXP) | 536 | if (flags & LM_FLAG_NOEXP) |
316 | set_bit(LFL_NOBLOCK, &lp->flags); | 537 | set_bit(LFL_NOBLOCK, &lp->flags); |
@@ -354,7 +575,7 @@ void gdlm_cancel(void *lock) | |||
354 | if (delay_list) { | 575 | if (delay_list) { |
355 | set_bit(LFL_CANCEL, &lp->flags); | 576 | set_bit(LFL_CANCEL, &lp->flags); |
356 | set_bit(LFL_ACTIVE, &lp->flags); | 577 | set_bit(LFL_ACTIVE, &lp->flags); |
357 | queue_complete(lp); | 578 | gdlm_ast(lp); |
358 | return; | 579 | return; |
359 | } | 580 | } |
360 | 581 | ||