aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/locking
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2006-01-16 11:52:38 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2006-01-16 11:52:38 -0500
commit29b7998d887529eca1ef43c8ca7b278448dc983c (patch)
tree8968045a976eba9ce349b045c8db7a9ee82a6a2e /fs/gfs2/locking
parentb3b94faa5fe5968827ba0640ee9fba4b3e7f736e (diff)
[GFS2] The lock modules for GFS2
This patch contains the pluggable locking modules for GFS2. Signed-off-by: David Teigland <teigland@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/locking')
-rw-r--r--fs/gfs2/locking/dlm/Makefile3
-rw-r--r--fs/gfs2/locking/dlm/lock.c537
-rw-r--r--fs/gfs2/locking/dlm/main.c58
-rw-r--r--fs/gfs2/locking/dlm/mount.c240
-rw-r--r--fs/gfs2/locking/dlm/sysfs.c315
-rw-r--r--fs/gfs2/locking/dlm/thread.c359
-rw-r--r--fs/gfs2/locking/nolock/Makefile3
-rw-r--r--fs/gfs2/locking/nolock/lock_nolock.mod.c44
-rw-r--r--fs/gfs2/locking/nolock/main.c357
9 files changed, 1916 insertions, 0 deletions
diff --git a/fs/gfs2/locking/dlm/Makefile b/fs/gfs2/locking/dlm/Makefile
new file mode 100644
index 000000000000..d3bca02f7b3e
--- /dev/null
+++ b/fs/gfs2/locking/dlm/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_GFS2_FS) += lock_dlm.o
2lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o
3
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
new file mode 100644
index 000000000000..daf59d504e29
--- /dev/null
+++ b/fs/gfs2/locking/dlm/lock.c
@@ -0,0 +1,537 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "lock_dlm.h"
15
16static char junk_lvb[GDLM_LVB_SIZE];
17
18static void queue_complete(struct gdlm_lock *lp)
19{
20 struct gdlm_ls *ls = lp->ls;
21
22 clear_bit(LFL_ACTIVE, &lp->flags);
23
24 spin_lock(&ls->async_lock);
25 list_add_tail(&lp->clist, &ls->complete);
26 spin_unlock(&ls->async_lock);
27 wake_up(&ls->thread_wait);
28}
29
30static inline void gdlm_ast(void *astarg)
31{
32 queue_complete((struct gdlm_lock *) astarg);
33}
34
35static inline void gdlm_bast(void *astarg, int mode)
36{
37 struct gdlm_lock *lp = astarg;
38 struct gdlm_ls *ls = lp->ls;
39
40 if (!mode) {
41 printk("lock_dlm: bast mode zero %x,%"PRIx64"\n",
42 lp->lockname.ln_type, lp->lockname.ln_number);
43 return;
44 }
45
46 spin_lock(&ls->async_lock);
47 if (!lp->bast_mode) {
48 list_add_tail(&lp->blist, &ls->blocking);
49 lp->bast_mode = mode;
50 } else if (lp->bast_mode < mode)
51 lp->bast_mode = mode;
52 spin_unlock(&ls->async_lock);
53 wake_up(&ls->thread_wait);
54}
55
56void gdlm_queue_delayed(struct gdlm_lock *lp)
57{
58 struct gdlm_ls *ls = lp->ls;
59
60 spin_lock(&ls->async_lock);
61 list_add_tail(&lp->delay_list, &ls->delayed);
62 spin_unlock(&ls->async_lock);
63}
64
65/* convert gfs lock-state to dlm lock-mode */
66
67static int16_t make_mode(int16_t lmstate)
68{
69 switch (lmstate) {
70 case LM_ST_UNLOCKED:
71 return DLM_LOCK_NL;
72 case LM_ST_EXCLUSIVE:
73 return DLM_LOCK_EX;
74 case LM_ST_DEFERRED:
75 return DLM_LOCK_CW;
76 case LM_ST_SHARED:
77 return DLM_LOCK_PR;
78 default:
79 GDLM_ASSERT(0, printk("unknown LM state %d\n", lmstate););
80 }
81}
82
83/* convert dlm lock-mode to gfs lock-state */
84
85int16_t gdlm_make_lmstate(int16_t dlmmode)
86{
87 switch (dlmmode) {
88 case DLM_LOCK_IV:
89 case DLM_LOCK_NL:
90 return LM_ST_UNLOCKED;
91 case DLM_LOCK_EX:
92 return LM_ST_EXCLUSIVE;
93 case DLM_LOCK_CW:
94 return LM_ST_DEFERRED;
95 case DLM_LOCK_PR:
96 return LM_ST_SHARED;
97 default:
98 GDLM_ASSERT(0, printk("unknown DLM mode %d\n", dlmmode););
99 }
100}
101
102/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
103 DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
104
105static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
106{
107 int16_t cur = make_mode(cur_state);
108 if (lp->cur != DLM_LOCK_IV)
109 GDLM_ASSERT(lp->cur == cur, printk("%d, %d\n", lp->cur, cur););
110}
111
112static inline unsigned int make_flags(struct gdlm_lock *lp,
113 unsigned int gfs_flags,
114 int16_t cur, int16_t req)
115{
116 unsigned int lkf = 0;
117
118 if (gfs_flags & LM_FLAG_TRY)
119 lkf |= DLM_LKF_NOQUEUE;
120
121 if (gfs_flags & LM_FLAG_TRY_1CB) {
122 lkf |= DLM_LKF_NOQUEUE;
123 lkf |= DLM_LKF_NOQUEUEBAST;
124 }
125
126 if (gfs_flags & LM_FLAG_PRIORITY) {
127 lkf |= DLM_LKF_NOORDER;
128 lkf |= DLM_LKF_HEADQUE;
129 }
130
131 if (gfs_flags & LM_FLAG_ANY) {
132 if (req == DLM_LOCK_PR)
133 lkf |= DLM_LKF_ALTCW;
134 else if (req == DLM_LOCK_CW)
135 lkf |= DLM_LKF_ALTPR;
136 }
137
138 if (lp->lksb.sb_lkid != 0) {
139 lkf |= DLM_LKF_CONVERT;
140
141 /* Conversion deadlock avoidance by DLM */
142
143 if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
144 !(lkf & DLM_LKF_NOQUEUE) &&
145 cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
146 lkf |= DLM_LKF_CONVDEADLK;
147 }
148
149 if (lp->lvb)
150 lkf |= DLM_LKF_VALBLK;
151
152 return lkf;
153}
154
155/* make_strname - convert GFS lock numbers to a string */
156
157static inline void make_strname(struct lm_lockname *lockname,
158 struct gdlm_strname *str)
159{
160 sprintf(str->name, "%8x%16"PRIx64, lockname->ln_type,
161 lockname->ln_number);
162 str->namelen = GDLM_STRNAME_BYTES;
163}
164
165int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
166 struct gdlm_lock **lpp)
167{
168 struct gdlm_lock *lp;
169
170 lp = kmalloc(sizeof(struct gdlm_lock), GFP_KERNEL);
171 if (!lp)
172 return -ENOMEM;
173
174 memset(lp, 0, sizeof(struct gdlm_lock));
175 lp->lockname = *name;
176 lp->ls = ls;
177 lp->cur = DLM_LOCK_IV;
178 lp->lvb = NULL;
179 lp->hold_null = NULL;
180 init_completion(&lp->ast_wait);
181 INIT_LIST_HEAD(&lp->clist);
182 INIT_LIST_HEAD(&lp->blist);
183 INIT_LIST_HEAD(&lp->delay_list);
184
185 spin_lock(&ls->async_lock);
186 list_add(&lp->all_list, &ls->all_locks);
187 ls->all_locks_count++;
188 spin_unlock(&ls->async_lock);
189
190 *lpp = lp;
191 return 0;
192}
193
194void gdlm_delete_lp(struct gdlm_lock *lp)
195{
196 struct gdlm_ls *ls = lp->ls;
197
198 spin_lock(&ls->async_lock);
199 if (!list_empty(&lp->clist))
200 list_del_init(&lp->clist);
201 if (!list_empty(&lp->blist))
202 list_del_init(&lp->blist);
203 if (!list_empty(&lp->delay_list))
204 list_del_init(&lp->delay_list);
205 GDLM_ASSERT(!list_empty(&lp->all_list),);
206 list_del_init(&lp->all_list);
207 ls->all_locks_count--;
208 spin_unlock(&ls->async_lock);
209
210 kfree(lp);
211}
212
213int gdlm_get_lock(lm_lockspace_t *lockspace, struct lm_lockname *name,
214 lm_lock_t **lockp)
215{
216 struct gdlm_lock *lp;
217 int error;
218
219 error = gdlm_create_lp((struct gdlm_ls *) lockspace, name, &lp);
220
221 *lockp = (lm_lock_t *) lp;
222 return error;
223}
224
225void gdlm_put_lock(lm_lock_t *lock)
226{
227 gdlm_delete_lp((struct gdlm_lock *) lock);
228}
229
230void gdlm_do_lock(struct gdlm_lock *lp, struct dlm_range *range)
231{
232 struct gdlm_ls *ls = lp->ls;
233 struct gdlm_strname str;
234 int error, bast = 1;
235
236 /*
237 * When recovery is in progress, delay lock requests for submission
238 * once recovery is done. Requests for recovery (NOEXP) and unlocks
239 * can pass.
240 */
241
242 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
243 !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
244 gdlm_queue_delayed(lp);
245 return;
246 }
247
248 /*
249 * Submit the actual lock request.
250 */
251
252 if (test_bit(LFL_NOBAST, &lp->flags))
253 bast = 0;
254
255 make_strname(&lp->lockname, &str);
256
257 set_bit(LFL_ACTIVE, &lp->flags);
258
259 log_debug("lk %x,%"PRIx64" id %x %d,%d %x", lp->lockname.ln_type,
260 lp->lockname.ln_number, lp->lksb.sb_lkid,
261 lp->cur, lp->req, lp->lkf);
262
263 error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
264 str.name, str.namelen, 0, gdlm_ast, (void *) lp,
265 bast ? gdlm_bast : NULL, range);
266
267 if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
268 lp->lksb.sb_status = -EAGAIN;
269 queue_complete(lp);
270 error = 0;
271 }
272
273 GDLM_ASSERT(!error,
274 printk("%s: num=%x,%"PRIx64" err=%d cur=%d req=%d lkf=%x\n",
275 ls->fsname, lp->lockname.ln_type,
276 lp->lockname.ln_number, error, lp->cur, lp->req,
277 lp->lkf););
278}
279
280void gdlm_do_unlock(struct gdlm_lock *lp)
281{
282 unsigned int lkf = 0;
283 int error;
284
285 set_bit(LFL_DLM_UNLOCK, &lp->flags);
286 set_bit(LFL_ACTIVE, &lp->flags);
287
288 if (lp->lvb)
289 lkf = DLM_LKF_VALBLK;
290
291 log_debug("un %x,%"PRIx64" %x %d %x", lp->lockname.ln_type,
292 lp->lockname.ln_number, lp->lksb.sb_lkid, lp->cur, lkf);
293
294 error = dlm_unlock(lp->ls->dlm_lockspace, lp->lksb.sb_lkid, lkf,
295 NULL, lp);
296
297 GDLM_ASSERT(!error,
298 printk("%s: error=%d num=%x,%"PRIx64" lkf=%x flags=%lx\n",
299 lp->ls->fsname, error, lp->lockname.ln_type,
300 lp->lockname.ln_number, lkf, lp->flags););
301}
302
303unsigned int gdlm_lock(lm_lock_t *lock, unsigned int cur_state,
304 unsigned int req_state, unsigned int flags)
305{
306 struct gdlm_lock *lp = (struct gdlm_lock *) lock;
307
308 clear_bit(LFL_DLM_CANCEL, &lp->flags);
309 if (flags & LM_FLAG_NOEXP)
310 set_bit(LFL_NOBLOCK, &lp->flags);
311
312 check_cur_state(lp, cur_state);
313 lp->req = make_mode(req_state);
314 lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
315
316 gdlm_do_lock(lp, NULL);
317 return LM_OUT_ASYNC;
318}
319
320unsigned int gdlm_unlock(lm_lock_t *lock, unsigned int cur_state)
321{
322 struct gdlm_lock *lp = (struct gdlm_lock *) lock;
323
324 clear_bit(LFL_DLM_CANCEL, &lp->flags);
325 if (lp->cur == DLM_LOCK_IV)
326 return 0;
327 gdlm_do_unlock(lp);
328 return LM_OUT_ASYNC;
329}
330
331void gdlm_cancel(lm_lock_t *lock)
332{
333 struct gdlm_lock *lp = (struct gdlm_lock *) lock;
334 struct gdlm_ls *ls = lp->ls;
335 int error, delay_list = 0;
336
337 if (test_bit(LFL_DLM_CANCEL, &lp->flags))
338 return;
339
340 log_all("gdlm_cancel %x,%"PRIx64" flags %lx",
341 lp->lockname.ln_type, lp->lockname.ln_number, lp->flags);
342
343 spin_lock(&ls->async_lock);
344 if (!list_empty(&lp->delay_list)) {
345 list_del_init(&lp->delay_list);
346 delay_list = 1;
347 }
348 spin_unlock(&ls->async_lock);
349
350 if (delay_list) {
351 set_bit(LFL_CANCEL, &lp->flags);
352 set_bit(LFL_ACTIVE, &lp->flags);
353 queue_complete(lp);
354 return;
355 }
356
357 if (!test_bit(LFL_ACTIVE, &lp->flags) ||
358 test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
359 log_all("gdlm_cancel skip %x,%"PRIx64" flags %lx",
360 lp->lockname.ln_type, lp->lockname.ln_number,
361 lp->flags);
362 return;
363 }
364
365 /* the lock is blocked in the dlm */
366
367 set_bit(LFL_DLM_CANCEL, &lp->flags);
368 set_bit(LFL_ACTIVE, &lp->flags);
369
370 error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
371 NULL, lp);
372
373 log_all("gdlm_cancel rv %d %x,%"PRIx64" flags %lx", error,
374 lp->lockname.ln_type, lp->lockname.ln_number, lp->flags);
375
376 if (error == -EBUSY)
377 clear_bit(LFL_DLM_CANCEL, &lp->flags);
378}
379
380int gdlm_add_lvb(struct gdlm_lock *lp)
381{
382 char *lvb;
383
384 lvb = kmalloc(GDLM_LVB_SIZE, GFP_KERNEL);
385 if (!lvb)
386 return -ENOMEM;
387
388 memset(lvb, 0, GDLM_LVB_SIZE);
389
390 lp->lksb.sb_lvbptr = lvb;
391 lp->lvb = lvb;
392 return 0;
393}
394
395void gdlm_del_lvb(struct gdlm_lock *lp)
396{
397 kfree(lp->lvb);
398 lp->lvb = NULL;
399 lp->lksb.sb_lvbptr = NULL;
400}
401
402/* This can do a synchronous dlm request (requiring a lock_dlm thread to get
403 the completion) because gfs won't call hold_lvb() during a callback (from
404 the context of a lock_dlm thread). */
405
406static int hold_null_lock(struct gdlm_lock *lp)
407{
408 struct gdlm_lock *lpn = NULL;
409 int error;
410
411 if (lp->hold_null) {
412 printk("lock_dlm: lvb already held\n");
413 return 0;
414 }
415
416 error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
417 if (error)
418 goto out;
419
420 lpn->lksb.sb_lvbptr = junk_lvb;
421 lpn->lvb = junk_lvb;
422
423 lpn->req = DLM_LOCK_NL;
424 lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
425 set_bit(LFL_NOBAST, &lpn->flags);
426 set_bit(LFL_INLOCK, &lpn->flags);
427
428 init_completion(&lpn->ast_wait);
429 gdlm_do_lock(lpn, NULL);
430 wait_for_completion(&lpn->ast_wait);
431 error = lp->lksb.sb_status;
432 if (error) {
433 printk("lock_dlm: hold_null_lock dlm error %d\n", error);
434 gdlm_delete_lp(lpn);
435 lpn = NULL;
436 }
437 out:
438 lp->hold_null = lpn;
439 return error;
440}
441
442/* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
443 the completion) because gfs may call unhold_lvb() during a callback (from
444 the context of a lock_dlm thread) which could cause a deadlock since the
445 other lock_dlm thread could be engaged in recovery. */
446
447static void unhold_null_lock(struct gdlm_lock *lp)
448{
449 struct gdlm_lock *lpn = lp->hold_null;
450
451 GDLM_ASSERT(lpn,);
452 lpn->lksb.sb_lvbptr = NULL;
453 lpn->lvb = NULL;
454 set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
455 gdlm_do_unlock(lpn);
456 lp->hold_null = NULL;
457}
458
459/* Acquire a NL lock because gfs requires the value block to remain
460 intact on the resource while the lvb is "held" even if it's holding no locks
461 on the resource. */
462
463int gdlm_hold_lvb(lm_lock_t *lock, char **lvbp)
464{
465 struct gdlm_lock *lp = (struct gdlm_lock *) lock;
466 int error;
467
468 error = gdlm_add_lvb(lp);
469 if (error)
470 return error;
471
472 *lvbp = lp->lvb;
473
474 error = hold_null_lock(lp);
475 if (error)
476 gdlm_del_lvb(lp);
477
478 return error;
479}
480
481void gdlm_unhold_lvb(lm_lock_t *lock, char *lvb)
482{
483 struct gdlm_lock *lp = (struct gdlm_lock *) lock;
484
485 unhold_null_lock(lp);
486 gdlm_del_lvb(lp);
487}
488
489void gdlm_sync_lvb(lm_lock_t *lock, char *lvb)
490{
491 struct gdlm_lock *lp = (struct gdlm_lock *) lock;
492
493 if (lp->cur != DLM_LOCK_EX)
494 return;
495
496 init_completion(&lp->ast_wait);
497 set_bit(LFL_SYNC_LVB, &lp->flags);
498
499 lp->req = DLM_LOCK_EX;
500 lp->lkf = make_flags(lp, 0, lp->cur, lp->req);
501
502 gdlm_do_lock(lp, NULL);
503 wait_for_completion(&lp->ast_wait);
504}
505
506void gdlm_submit_delayed(struct gdlm_ls *ls)
507{
508 struct gdlm_lock *lp, *safe;
509
510 spin_lock(&ls->async_lock);
511 list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
512 list_del_init(&lp->delay_list);
513 list_add_tail(&lp->delay_list, &ls->submit);
514 }
515 spin_unlock(&ls->async_lock);
516 wake_up(&ls->thread_wait);
517}
518
519int gdlm_release_all_locks(struct gdlm_ls *ls)
520{
521 struct gdlm_lock *lp, *safe;
522 int count = 0;
523
524 spin_lock(&ls->async_lock);
525 list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
526 list_del_init(&lp->all_list);
527
528 if (lp->lvb && lp->lvb != junk_lvb)
529 kfree(lp->lvb);
530 kfree(lp);
531 count++;
532 }
533 spin_unlock(&ls->async_lock);
534
535 return count;
536}
537
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
new file mode 100644
index 000000000000..3ced92ef1b19
--- /dev/null
+++ b/fs/gfs2/locking/dlm/main.c
@@ -0,0 +1,58 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include <linux/init.h>
15
16#include "lock_dlm.h"
17
18extern int gdlm_drop_count;
19extern int gdlm_drop_period;
20
21extern struct lm_lockops gdlm_ops;
22
23int __init init_lock_dlm(void)
24{
25 int error;
26
27 error = lm_register_proto(&gdlm_ops);
28 if (error) {
29 printk("lock_dlm: can't register protocol: %d\n", error);
30 return error;
31 }
32
33 error = gdlm_sysfs_init();
34 if (error) {
35 lm_unregister_proto(&gdlm_ops);
36 return error;
37 }
38
39 gdlm_drop_count = GDLM_DROP_COUNT;
40 gdlm_drop_period = GDLM_DROP_PERIOD;
41
42 printk("Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
43 return 0;
44}
45
46void __exit exit_lock_dlm(void)
47{
48 lm_unregister_proto(&gdlm_ops);
49 gdlm_sysfs_exit();
50}
51
52module_init(init_lock_dlm);
53module_exit(exit_lock_dlm);
54
55MODULE_DESCRIPTION("GFS DLM Locking Module");
56MODULE_AUTHOR("Red Hat, Inc.");
57MODULE_LICENSE("GPL");
58
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
new file mode 100644
index 000000000000..92b1789deb89
--- /dev/null
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -0,0 +1,240 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "lock_dlm.h"
15
16int gdlm_drop_count;
17int gdlm_drop_period;
18struct lm_lockops gdlm_ops;
19
20
21static struct gdlm_ls *init_gdlm(lm_callback_t cb, lm_fsdata_t *fsdata,
22 int flags, char *table_name)
23{
24 struct gdlm_ls *ls;
25 char buf[256], *p;
26
27 ls = kmalloc(sizeof(struct gdlm_ls), GFP_KERNEL);
28 if (!ls)
29 return NULL;
30
31 memset(ls, 0, sizeof(struct gdlm_ls));
32
33 ls->drop_locks_count = gdlm_drop_count;
34 ls->drop_locks_period = gdlm_drop_period;
35
36 ls->fscb = cb;
37 ls->fsdata = fsdata;
38 ls->fsflags = flags;
39
40 spin_lock_init(&ls->async_lock);
41
42 INIT_LIST_HEAD(&ls->complete);
43 INIT_LIST_HEAD(&ls->blocking);
44 INIT_LIST_HEAD(&ls->delayed);
45 INIT_LIST_HEAD(&ls->submit);
46 INIT_LIST_HEAD(&ls->all_locks);
47
48 init_waitqueue_head(&ls->thread_wait);
49 init_waitqueue_head(&ls->wait_control);
50 ls->thread1 = NULL;
51 ls->thread2 = NULL;
52 ls->drop_time = jiffies;
53 ls->jid = -1;
54
55 strncpy(buf, table_name, 256);
56 buf[255] = '\0';
57
58 p = strstr(buf, ":");
59 if (!p) {
60 printk("lock_dlm: invalid table_name \"%s\"\n", table_name);
61 kfree(ls);
62 return NULL;
63 }
64 *p = '\0';
65 p++;
66
67 strncpy(ls->clustername, buf, 128);
68 strncpy(ls->fsname, p, 128);
69
70 return ls;
71}
72
73static int gdlm_mount(char *table_name, char *host_data,
74 lm_callback_t cb, lm_fsdata_t *fsdata,
75 unsigned int min_lvb_size, int flags,
76 struct lm_lockstruct *lockstruct)
77{
78 struct gdlm_ls *ls;
79 int error = -ENOMEM;
80
81 if (min_lvb_size > GDLM_LVB_SIZE)
82 goto out;
83
84 ls = init_gdlm(cb, fsdata, flags, table_name);
85 if (!ls)
86 goto out;
87
88 error = gdlm_init_threads(ls);
89 if (error)
90 goto out_free;
91
92 error = dlm_new_lockspace(ls->fsname, strlen(ls->fsname),
93 &ls->dlm_lockspace, 0, GDLM_LVB_SIZE);
94 if (error) {
95 printk("lock_dlm: dlm_new_lockspace error %d\n", error);
96 goto out_thread;
97 }
98
99 error = gdlm_kobject_setup(ls);
100 if (error)
101 goto out_dlm;
102 kobject_uevent(&ls->kobj, KOBJ_MOUNT, NULL);
103
104 /* Now we depend on userspace to notice the new mount,
105 join the appropriate group, and do a write to our sysfs
106 "mounted" or "terminate" file. Before the start, userspace
107 must set "jid" and "first". */
108
109 error = wait_event_interruptible(ls->wait_control,
110 test_bit(DFL_JOIN_DONE, &ls->flags));
111 if (error)
112 goto out_sysfs;
113
114 if (test_bit(DFL_TERMINATE, &ls->flags)) {
115 error = -ERESTARTSYS;
116 goto out_sysfs;
117 }
118
119 lockstruct->ls_jid = ls->jid;
120 lockstruct->ls_first = ls->first;
121 lockstruct->ls_lockspace = ls;
122 lockstruct->ls_ops = &gdlm_ops;
123 lockstruct->ls_flags = 0;
124 lockstruct->ls_lvb_size = GDLM_LVB_SIZE;
125 return 0;
126
127 out_sysfs:
128 gdlm_kobject_release(ls);
129 out_dlm:
130 dlm_release_lockspace(ls->dlm_lockspace, 2);
131 out_thread:
132 gdlm_release_threads(ls);
133 out_free:
134 kfree(ls);
135 out:
136 return error;
137}
138
139static void gdlm_unmount(lm_lockspace_t *lockspace)
140{
141 struct gdlm_ls *ls = (struct gdlm_ls *) lockspace;
142 int rv;
143
144 log_debug("unmount flags %lx", ls->flags);
145
146 if (test_bit(DFL_WITHDRAW, &ls->flags)) {
147 gdlm_kobject_release(ls);
148 goto out;
149 }
150
151 kobject_uevent(&ls->kobj, KOBJ_UMOUNT, NULL);
152
153 wait_event_interruptible(ls->wait_control,
154 test_bit(DFL_LEAVE_DONE, &ls->flags));
155
156 gdlm_kobject_release(ls);
157 dlm_release_lockspace(ls->dlm_lockspace, 2);
158 gdlm_release_threads(ls);
159 rv = gdlm_release_all_locks(ls);
160 if (rv)
161 log_all("lm_dlm_unmount: %d stray locks freed", rv);
162 out:
163 kfree(ls);
164}
165
166static void gdlm_recovery_done(lm_lockspace_t *lockspace, unsigned int jid,
167 unsigned int message)
168{
169 struct gdlm_ls *ls = (struct gdlm_ls *) lockspace;
170 ls->recover_done = jid;
171 kobject_uevent(&ls->kobj, KOBJ_CHANGE, NULL);
172}
173
174static void gdlm_others_may_mount(lm_lockspace_t *lockspace)
175{
176 struct gdlm_ls *ls = (struct gdlm_ls *) lockspace;
177 ls->first_done = 1;
178 kobject_uevent(&ls->kobj, KOBJ_CHANGE, NULL);
179}
180
181static void gdlm_withdraw(lm_lockspace_t *lockspace)
182{
183 struct gdlm_ls *ls = (struct gdlm_ls *) lockspace;
184
185 /* userspace suspends locking on all other members */
186
187 kobject_uevent(&ls->kobj, KOBJ_OFFLINE, NULL);
188
189 wait_event_interruptible(ls->wait_control,
190 test_bit(DFL_WITHDRAW, &ls->flags));
191
192 dlm_release_lockspace(ls->dlm_lockspace, 2);
193 gdlm_release_threads(ls);
194 gdlm_release_all_locks(ls);
195
196 kobject_uevent(&ls->kobj, KOBJ_UMOUNT, NULL);
197
198 /* userspace leaves the mount group, we don't need to wait for
199 that to complete */
200}
201
202int gdlm_plock_get(lm_lockspace_t *lockspace, struct lm_lockname *name,
203 struct file *file, struct file_lock *fl)
204{
205 return -ENOSYS;
206}
207
208int gdlm_punlock(lm_lockspace_t *lockspace, struct lm_lockname *name,
209 struct file *file, struct file_lock *fl)
210{
211 return -ENOSYS;
212}
213
214int gdlm_plock(lm_lockspace_t *lockspace, struct lm_lockname *name,
215 struct file *file, int cmd, struct file_lock *fl)
216{
217 return -ENOSYS;
218}
219
220struct lm_lockops gdlm_ops = {
221 lm_proto_name:"lock_dlm",
222 lm_mount:gdlm_mount,
223 lm_others_may_mount:gdlm_others_may_mount,
224 lm_unmount:gdlm_unmount,
225 lm_withdraw:gdlm_withdraw,
226 lm_get_lock:gdlm_get_lock,
227 lm_put_lock:gdlm_put_lock,
228 lm_lock:gdlm_lock,
229 lm_unlock:gdlm_unlock,
230 lm_plock:gdlm_plock,
231 lm_punlock:gdlm_punlock,
232 lm_plock_get:gdlm_plock_get,
233 lm_cancel:gdlm_cancel,
234 lm_hold_lvb:gdlm_hold_lvb,
235 lm_unhold_lvb:gdlm_unhold_lvb,
236 lm_sync_lvb:gdlm_sync_lvb,
237 lm_recovery_done:gdlm_recovery_done,
238 lm_owner:THIS_MODULE,
239};
240
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
new file mode 100644
index 000000000000..8964733f55e4
--- /dev/null
+++ b/fs/gfs2/locking/dlm/sysfs.c
@@ -0,0 +1,315 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#include <linux/ctype.h>
14#include <linux/stat.h>
15
16#include "lock_dlm.h"
17
18static ssize_t gdlm_block_show(struct gdlm_ls *ls, char *buf)
19{
20 ssize_t ret;
21 int val = 0;
22
23 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags))
24 val = 1;
25 ret = sprintf(buf, "%d\n", val);
26 return ret;
27}
28
29static ssize_t gdlm_block_store(struct gdlm_ls *ls, const char *buf, size_t len)
30{
31 ssize_t ret = len;
32 int val;
33
34 val = simple_strtol(buf, NULL, 0);
35
36 if (val == 1)
37 set_bit(DFL_BLOCK_LOCKS, &ls->flags);
38 else if (val == 0) {
39 clear_bit(DFL_BLOCK_LOCKS, &ls->flags);
40 gdlm_submit_delayed(ls);
41 } else
42 ret = -EINVAL;
43 return ret;
44}
45
46static ssize_t gdlm_mounted_show(struct gdlm_ls *ls, char *buf)
47{
48 ssize_t ret;
49 int val = -2;
50
51 if (test_bit(DFL_TERMINATE, &ls->flags))
52 val = -1;
53 else if (test_bit(DFL_LEAVE_DONE, &ls->flags))
54 val = 0;
55 else if (test_bit(DFL_JOIN_DONE, &ls->flags))
56 val = 1;
57 ret = sprintf(buf, "%d\n", val);
58 return ret;
59}
60
61static ssize_t gdlm_mounted_store(struct gdlm_ls *ls, const char *buf, size_t len)
62{
63 ssize_t ret = len;
64 int val;
65
66 val = simple_strtol(buf, NULL, 0);
67
68 if (val == 1)
69 set_bit(DFL_JOIN_DONE, &ls->flags);
70 else if (val == 0)
71 set_bit(DFL_LEAVE_DONE, &ls->flags);
72 else if (val == -1) {
73 set_bit(DFL_TERMINATE, &ls->flags);
74 set_bit(DFL_JOIN_DONE, &ls->flags);
75 set_bit(DFL_LEAVE_DONE, &ls->flags);
76 } else
77 ret = -EINVAL;
78 wake_up(&ls->wait_control);
79 return ret;
80}
81
82static ssize_t gdlm_withdraw_show(struct gdlm_ls *ls, char *buf)
83{
84 ssize_t ret;
85 int val = 0;
86
87 if (test_bit(DFL_WITHDRAW, &ls->flags))
88 val = 1;
89 ret = sprintf(buf, "%d\n", val);
90 return ret;
91}
92
93static ssize_t gdlm_withdraw_store(struct gdlm_ls *ls, const char *buf, size_t len)
94{
95 ssize_t ret = len;
96 int val;
97
98 val = simple_strtol(buf, NULL, 0);
99
100 if (val == 1)
101 set_bit(DFL_WITHDRAW, &ls->flags);
102 else
103 ret = -EINVAL;
104 wake_up(&ls->wait_control);
105 return ret;
106}
107
108static ssize_t gdlm_jid_show(struct gdlm_ls *ls, char *buf)
109{
110 return sprintf(buf, "%u\n", ls->jid);
111}
112
113static ssize_t gdlm_jid_store(struct gdlm_ls *ls, const char *buf, size_t len)
114{
115 ls->jid = simple_strtol(buf, NULL, 0);
116 return len;
117}
118
119static ssize_t gdlm_first_show(struct gdlm_ls *ls, char *buf)
120{
121 return sprintf(buf, "%u\n", ls->first);
122}
123
124static ssize_t gdlm_first_store(struct gdlm_ls *ls, const char *buf, size_t len)
125{
126 ls->first = simple_strtol(buf, NULL, 0);
127 return len;
128}
129
130static ssize_t gdlm_first_done_show(struct gdlm_ls *ls, char *buf)
131{
132 return sprintf(buf, "%d\n", ls->first_done);
133}
134
135static ssize_t gdlm_recover_show(struct gdlm_ls *ls, char *buf)
136{
137 return sprintf(buf, "%u\n", ls->recover_jid);
138}
139
140static ssize_t gdlm_recover_store(struct gdlm_ls *ls, const char *buf, size_t len)
141{
142 ls->recover_jid = simple_strtol(buf, NULL, 0);
143 ls->fscb(ls->fsdata, LM_CB_NEED_RECOVERY, &ls->recover_jid);
144 return len;
145}
146
147static ssize_t gdlm_recover_done_show(struct gdlm_ls *ls, char *buf)
148{
149 ssize_t ret;
150 ret = sprintf(buf, "%d\n", ls->recover_done);
151 return ret;
152}
153
154static ssize_t gdlm_cluster_show(struct gdlm_ls *ls, char *buf)
155{
156 ssize_t ret;
157 ret = sprintf(buf, "%s\n", ls->clustername);
158 return ret;
159}
160
161static ssize_t gdlm_options_show(struct gdlm_ls *ls, char *buf)
162{
163 ssize_t ret = 0;
164
165 if (ls->fsflags & LM_MFLAG_SPECTATOR)
166 ret += sprintf(buf, "spectator ");
167
168 return ret;
169}
170
171struct gdlm_attr {
172 struct attribute attr;
173 ssize_t (*show)(struct gdlm_ls *, char *);
174 ssize_t (*store)(struct gdlm_ls *, const char *, size_t);
175};
176
177static struct gdlm_attr gdlm_attr_block = {
178 .attr = {.name = "block", .mode = S_IRUGO | S_IWUSR},
179 .show = gdlm_block_show,
180 .store = gdlm_block_store
181};
182
183static struct gdlm_attr gdlm_attr_mounted = {
184 .attr = {.name = "mounted", .mode = S_IRUGO | S_IWUSR},
185 .show = gdlm_mounted_show,
186 .store = gdlm_mounted_store
187};
188
189static struct gdlm_attr gdlm_attr_withdraw = {
190 .attr = {.name = "withdraw", .mode = S_IRUGO | S_IWUSR},
191 .show = gdlm_withdraw_show,
192 .store = gdlm_withdraw_store
193};
194
195static struct gdlm_attr gdlm_attr_jid = {
196 .attr = {.name = "jid", .mode = S_IRUGO | S_IWUSR},
197 .show = gdlm_jid_show,
198 .store = gdlm_jid_store
199};
200
201static struct gdlm_attr gdlm_attr_first = {
202 .attr = {.name = "first", .mode = S_IRUGO | S_IWUSR},
203 .show = gdlm_first_show,
204 .store = gdlm_first_store
205};
206
207static struct gdlm_attr gdlm_attr_first_done = {
208 .attr = {.name = "first_done", .mode = S_IRUGO},
209 .show = gdlm_first_done_show,
210};
211
212static struct gdlm_attr gdlm_attr_recover = {
213 .attr = {.name = "recover", .mode = S_IRUGO | S_IWUSR},
214 .show = gdlm_recover_show,
215 .store = gdlm_recover_store
216};
217
218static struct gdlm_attr gdlm_attr_recover_done = {
219 .attr = {.name = "recover_done", .mode = S_IRUGO | S_IWUSR},
220 .show = gdlm_recover_done_show,
221};
222
223static struct gdlm_attr gdlm_attr_cluster = {
224 .attr = {.name = "cluster", .mode = S_IRUGO | S_IWUSR},
225 .show = gdlm_cluster_show,
226};
227
228static struct gdlm_attr gdlm_attr_options = {
229 .attr = {.name = "options", .mode = S_IRUGO | S_IWUSR},
230 .show = gdlm_options_show,
231};
232
233static struct attribute *gdlm_attrs[] = {
234 &gdlm_attr_block.attr,
235 &gdlm_attr_mounted.attr,
236 &gdlm_attr_withdraw.attr,
237 &gdlm_attr_jid.attr,
238 &gdlm_attr_first.attr,
239 &gdlm_attr_first_done.attr,
240 &gdlm_attr_recover.attr,
241 &gdlm_attr_recover_done.attr,
242 &gdlm_attr_cluster.attr,
243 &gdlm_attr_options.attr,
244 NULL,
245};
246
247static ssize_t gdlm_attr_show(struct kobject *kobj, struct attribute *attr,
248 char *buf)
249{
250 struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
251 struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr);
252 return a->show ? a->show(ls, buf) : 0;
253}
254
255static ssize_t gdlm_attr_store(struct kobject *kobj, struct attribute *attr,
256 const char *buf, size_t len)
257{
258 struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
259 struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr);
260 return a->store ? a->store(ls, buf, len) : len;
261}
262
263static struct sysfs_ops gdlm_attr_ops = {
264 .show = gdlm_attr_show,
265 .store = gdlm_attr_store,
266};
267
268static struct kobj_type gdlm_ktype = {
269 .default_attrs = gdlm_attrs,
270 .sysfs_ops = &gdlm_attr_ops,
271};
272
273static struct kset gdlm_kset = {
274 .subsys = &kernel_subsys,
275 .kobj = {.name = "lock_dlm",},
276 .ktype = &gdlm_ktype,
277};
278
279int gdlm_kobject_setup(struct gdlm_ls *ls)
280{
281 int error;
282
283 error = kobject_set_name(&ls->kobj, "%s", ls->fsname);
284 if (error)
285 return error;
286
287 ls->kobj.kset = &gdlm_kset;
288 ls->kobj.ktype = &gdlm_ktype;
289
290 error = kobject_register(&ls->kobj);
291
292 return 0;
293}
294
295void gdlm_kobject_release(struct gdlm_ls *ls)
296{
297 kobject_unregister(&ls->kobj);
298}
299
300int gdlm_sysfs_init(void)
301{
302 int error;
303
304 error = kset_register(&gdlm_kset);
305 if (error)
306 printk("lock_dlm: cannot register kset %d\n", error);
307
308 return error;
309}
310
311void gdlm_sysfs_exit(void)
312{
313 kset_unregister(&gdlm_kset);
314}
315
diff --git a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c
new file mode 100644
index 000000000000..22bbe6d3a5ae
--- /dev/null
+++ b/fs/gfs2/locking/dlm/thread.c
@@ -0,0 +1,359 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "lock_dlm.h"
15
16/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
17 thread gets to it. */
18
19static void queue_submit(struct gdlm_lock *lp)
20{
21 struct gdlm_ls *ls = lp->ls;
22
23 spin_lock(&ls->async_lock);
24 list_add_tail(&lp->delay_list, &ls->submit);
25 spin_unlock(&ls->async_lock);
26 wake_up(&ls->thread_wait);
27}
28
29static void process_submit(struct gdlm_lock *lp)
30{
31 gdlm_do_lock(lp, NULL);
32}
33
34static void process_blocking(struct gdlm_lock *lp, int bast_mode)
35{
36 struct gdlm_ls *ls = lp->ls;
37 unsigned int cb;
38
39 switch (gdlm_make_lmstate(bast_mode)) {
40 case LM_ST_EXCLUSIVE:
41 cb = LM_CB_NEED_E;
42 break;
43 case LM_ST_DEFERRED:
44 cb = LM_CB_NEED_D;
45 break;
46 case LM_ST_SHARED:
47 cb = LM_CB_NEED_S;
48 break;
49 default:
50 GDLM_ASSERT(0, printk("unknown bast mode %u\n",lp->bast_mode););
51 }
52
53 ls->fscb(ls->fsdata, cb, &lp->lockname);
54}
55
56static void process_complete(struct gdlm_lock *lp)
57{
58 struct gdlm_ls *ls = lp->ls;
59 struct lm_async_cb acb;
60 int16_t prev_mode = lp->cur;
61
62 memset(&acb, 0, sizeof(acb));
63
64 if (lp->lksb.sb_status == -DLM_ECANCEL) {
65 log_all("complete dlm cancel %x,%"PRIx64" flags %lx",
66 lp->lockname.ln_type, lp->lockname.ln_number,
67 lp->flags);
68
69 lp->req = lp->cur;
70 acb.lc_ret |= LM_OUT_CANCELED;
71 if (lp->cur == DLM_LOCK_IV)
72 lp->lksb.sb_lkid = 0;
73 goto out;
74 }
75
76 if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
77 if (lp->lksb.sb_status != -DLM_EUNLOCK) {
78 log_all("unlock sb_status %d %x,%"PRIx64" flags %lx",
79 lp->lksb.sb_status, lp->lockname.ln_type,
80 lp->lockname.ln_number, lp->flags);
81 return;
82 }
83
84 lp->cur = DLM_LOCK_IV;
85 lp->req = DLM_LOCK_IV;
86 lp->lksb.sb_lkid = 0;
87
88 if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
89 gdlm_delete_lp(lp);
90 return;
91 }
92 goto out;
93 }
94
95 if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
96 memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
97
98 if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
99 if (lp->req == DLM_LOCK_PR)
100 lp->req = DLM_LOCK_CW;
101 else if (lp->req == DLM_LOCK_CW)
102 lp->req = DLM_LOCK_PR;
103 }
104
105 /*
106 * A canceled lock request. The lock was just taken off the delayed
107 * list and was never even submitted to dlm.
108 */
109
110 if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
111 log_all("complete internal cancel %x,%"PRIx64"",
112 lp->lockname.ln_type, lp->lockname.ln_number);
113 lp->req = lp->cur;
114 acb.lc_ret |= LM_OUT_CANCELED;
115 goto out;
116 }
117
118 /*
119 * An error occured.
120 */
121
122 if (lp->lksb.sb_status) {
123 /* a "normal" error */
124 if ((lp->lksb.sb_status == -EAGAIN) &&
125 (lp->lkf & DLM_LKF_NOQUEUE)) {
126 lp->req = lp->cur;
127 if (lp->cur == DLM_LOCK_IV)
128 lp->lksb.sb_lkid = 0;
129 goto out;
130 }
131
132 /* this could only happen with cancels I think */
133 log_all("ast sb_status %d %x,%"PRIx64" flags %lx",
134 lp->lksb.sb_status, lp->lockname.ln_type,
135 lp->lockname.ln_number, lp->flags);
136 return;
137 }
138
139 /*
140 * This is an AST for an EX->EX conversion for sync_lvb from GFS.
141 */
142
143 if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
144 complete(&lp->ast_wait);
145 return;
146 }
147
148 /*
149 * A lock has been demoted to NL because it initially completed during
150 * BLOCK_LOCKS. Now it must be requested in the originally requested
151 * mode.
152 */
153
154 if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
155 GDLM_ASSERT(lp->req == DLM_LOCK_NL,);
156 GDLM_ASSERT(lp->prev_req > DLM_LOCK_NL,);
157
158 lp->cur = DLM_LOCK_NL;
159 lp->req = lp->prev_req;
160 lp->prev_req = DLM_LOCK_IV;
161 lp->lkf &= ~DLM_LKF_CONVDEADLK;
162
163 set_bit(LFL_NOCACHE, &lp->flags);
164
165 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
166 !test_bit(LFL_NOBLOCK, &lp->flags))
167 gdlm_queue_delayed(lp);
168 else
169 queue_submit(lp);
170 return;
171 }
172
173 /*
174 * A request is granted during dlm recovery. It may be granted
175 * because the locks of a failed node were cleared. In that case,
176 * there may be inconsistent data beneath this lock and we must wait
177 * for recovery to complete to use it. When gfs recovery is done this
178 * granted lock will be converted to NL and then reacquired in this
179 * granted state.
180 */
181
182 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
183 !test_bit(LFL_NOBLOCK, &lp->flags) &&
184 lp->req != DLM_LOCK_NL) {
185
186 lp->cur = lp->req;
187 lp->prev_req = lp->req;
188 lp->req = DLM_LOCK_NL;
189 lp->lkf |= DLM_LKF_CONVERT;
190 lp->lkf &= ~DLM_LKF_CONVDEADLK;
191
192 log_debug("rereq %x,%"PRIx64" id %x %d,%d",
193 lp->lockname.ln_type, lp->lockname.ln_number,
194 lp->lksb.sb_lkid, lp->cur, lp->req);
195
196 set_bit(LFL_REREQUEST, &lp->flags);
197 queue_submit(lp);
198 return;
199 }
200
201 /*
202 * DLM demoted the lock to NL before it was granted so GFS must be
203 * told it cannot cache data for this lock.
204 */
205
206 if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
207 set_bit(LFL_NOCACHE, &lp->flags);
208
209 out:
210 /*
211 * This is an internal lock_dlm lock
212 */
213
214 if (test_bit(LFL_INLOCK, &lp->flags)) {
215 clear_bit(LFL_NOBLOCK, &lp->flags);
216 lp->cur = lp->req;
217 complete(&lp->ast_wait);
218 return;
219 }
220
221 /*
222 * Normal completion of a lock request. Tell GFS it now has the lock.
223 */
224
225 clear_bit(LFL_NOBLOCK, &lp->flags);
226 lp->cur = lp->req;
227
228 acb.lc_name = lp->lockname;
229 acb.lc_ret |= gdlm_make_lmstate(lp->cur);
230
231 if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
232 (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
233 acb.lc_ret |= LM_OUT_CACHEABLE;
234
235 ls->fscb(ls->fsdata, LM_CB_ASYNC, &acb);
236}
237
238static inline int no_work(struct gdlm_ls *ls, int blocking)
239{
240 int ret;
241
242 spin_lock(&ls->async_lock);
243 ret = list_empty(&ls->complete) && list_empty(&ls->submit);
244 if (ret && blocking)
245 ret = list_empty(&ls->blocking);
246 spin_unlock(&ls->async_lock);
247
248 return ret;
249}
250
251static inline int check_drop(struct gdlm_ls *ls)
252{
253 if (!ls->drop_locks_count)
254 return 0;
255
256 if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) {
257 ls->drop_time = jiffies;
258 if (ls->all_locks_count >= ls->drop_locks_count)
259 return 1;
260 }
261 return 0;
262}
263
264static int gdlm_thread(void *data)
265{
266 struct gdlm_ls *ls = (struct gdlm_ls *) data;
267 struct gdlm_lock *lp = NULL;
268 int blist = 0;
269 uint8_t complete, blocking, submit, drop;
270 DECLARE_WAITQUEUE(wait, current);
271
272 /* Only thread1 is allowed to do blocking callbacks since gfs
273 may wait for a completion callback within a blocking cb. */
274
275 if (current == ls->thread1)
276 blist = 1;
277
278 while (!kthread_should_stop()) {
279 set_current_state(TASK_INTERRUPTIBLE);
280 add_wait_queue(&ls->thread_wait, &wait);
281 if (no_work(ls, blist))
282 schedule();
283 remove_wait_queue(&ls->thread_wait, &wait);
284 set_current_state(TASK_RUNNING);
285
286 complete = blocking = submit = drop = 0;
287
288 spin_lock(&ls->async_lock);
289
290 if (blist && !list_empty(&ls->blocking)) {
291 lp = list_entry(ls->blocking.next, struct gdlm_lock,
292 blist);
293 list_del_init(&lp->blist);
294 blocking = lp->bast_mode;
295 lp->bast_mode = 0;
296 } else if (!list_empty(&ls->complete)) {
297 lp = list_entry(ls->complete.next, struct gdlm_lock,
298 clist);
299 list_del_init(&lp->clist);
300 complete = 1;
301 } else if (!list_empty(&ls->submit)) {
302 lp = list_entry(ls->submit.next, struct gdlm_lock,
303 delay_list);
304 list_del_init(&lp->delay_list);
305 submit = 1;
306 }
307
308 drop = check_drop(ls);
309 spin_unlock(&ls->async_lock);
310
311 if (complete)
312 process_complete(lp);
313
314 else if (blocking)
315 process_blocking(lp, blocking);
316
317 else if (submit)
318 process_submit(lp);
319
320 if (drop)
321 ls->fscb(ls->fsdata, LM_CB_DROPLOCKS, NULL);
322
323 schedule();
324 }
325
326 return 0;
327}
328
329int gdlm_init_threads(struct gdlm_ls *ls)
330{
331 struct task_struct *p;
332 int error;
333
334 p = kthread_run(gdlm_thread, ls, "lock_dlm1");
335 error = IS_ERR(p);
336 if (error) {
337 log_all("can't start lock_dlm1 thread %d", error);
338 return error;
339 }
340 ls->thread1 = p;
341
342 p = kthread_run(gdlm_thread, ls, "lock_dlm2");
343 error = IS_ERR(p);
344 if (error) {
345 log_all("can't start lock_dlm2 thread %d", error);
346 kthread_stop(ls->thread1);
347 return error;
348 }
349 ls->thread2 = p;
350
351 return 0;
352}
353
354void gdlm_release_threads(struct gdlm_ls *ls)
355{
356 kthread_stop(ls->thread1);
357 kthread_stop(ls->thread2);
358}
359
diff --git a/fs/gfs2/locking/nolock/Makefile b/fs/gfs2/locking/nolock/Makefile
new file mode 100644
index 000000000000..cdadf956c831
--- /dev/null
+++ b/fs/gfs2/locking/nolock/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_GFS2_FS) += lock_nolock.o
2lock_nolock-y := main.o
3
diff --git a/fs/gfs2/locking/nolock/lock_nolock.mod.c b/fs/gfs2/locking/nolock/lock_nolock.mod.c
new file mode 100644
index 000000000000..ae92522b2182
--- /dev/null
+++ b/fs/gfs2/locking/nolock/lock_nolock.mod.c
@@ -0,0 +1,44 @@
1#include <linux/module.h>
2#include <linux/vermagic.h>
3#include <linux/compiler.h>
4
5MODULE_INFO(vermagic, VERMAGIC_STRING);
6
7#undef unix
8struct module __this_module
9__attribute__((section(".gnu.linkonce.this_module"))) = {
10 .name = __stringify(KBUILD_MODNAME),
11 .init = init_module,
12#ifdef CONFIG_MODULE_UNLOAD
13 .exit = cleanup_module,
14#endif
15};
16
17static const struct modversion_info ____versions[]
18__attribute_used__
19__attribute__((section("__versions"))) = {
20 { 0x316962fc, "struct_module" },
21 { 0x5a34a45c, "__kmalloc" },
22 { 0x724beef2, "malloc_sizes" },
23 { 0x3fa03a97, "memset" },
24 { 0xc16fe12d, "__memcpy" },
25 { 0xdd132261, "printk" },
26 { 0x859204af, "sscanf" },
27 { 0x3656bf5a, "lock_kernel" },
28 { 0x1e6d26a8, "strstr" },
29 { 0x41ede9df, "lm_register_proto" },
30 { 0xb1f975aa, "unlock_kernel" },
31 { 0x87b0b01f, "posix_lock_file_wait" },
32 { 0x75f29cfd, "kmem_cache_alloc" },
33 { 0x69384280, "lm_unregister_proto" },
34 { 0x37a0cba, "kfree" },
35 { 0x5d16bfe6, "posix_test_lock" },
36};
37
38static const char __module_depends[]
39__attribute_used__
40__attribute__((section(".modinfo"))) =
41"depends=gfs2";
42
43
44MODULE_INFO(srcversion, "123E446F965A386A0C017C4");
diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c
new file mode 100644
index 000000000000..d3919e471163
--- /dev/null
+++ b/fs/gfs2/locking/nolock/main.c
@@ -0,0 +1,357 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/fs.h>
20#include <linux/smp_lock.h>
21
22#include "../../lm_interface.h"
23
24struct nolock_lockspace {
25 unsigned int nl_lvb_size;
26};
27
28struct lm_lockops nolock_ops;
29
30/**
31 * nolock_mount - mount a nolock lockspace
32 * @table_name: the name of the space to mount
33 * @host_data: host specific data
34 * @cb: the callback
35 * @fsdata:
36 * @min_lvb_size:
37 * @flags:
38 * @lockstruct: the structure of crap to fill in
39 *
40 * Returns: 0 on success, -EXXX on failure
41 */
42
43static int nolock_mount(char *table_name, char *host_data,
44 lm_callback_t cb, lm_fsdata_t *fsdata,
45 unsigned int min_lvb_size, int flags,
46 struct lm_lockstruct *lockstruct)
47{
48 char *c;
49 unsigned int jid;
50 struct nolock_lockspace *nl;
51
52 /* If there is a "jid=" in the hostdata, return that jid.
53 Otherwise, return zero. */
54
55 c = strstr(host_data, "jid=");
56 if (!c)
57 jid = 0;
58 else {
59 c += 4;
60 sscanf(c, "%u", &jid);
61 }
62
63 nl = kmalloc(sizeof(struct nolock_lockspace), GFP_KERNEL);
64 if (!nl)
65 return -ENOMEM;
66
67 memset(nl, 0, sizeof(struct nolock_lockspace));
68 nl->nl_lvb_size = min_lvb_size;
69
70 lockstruct->ls_jid = jid;
71 lockstruct->ls_first = 1;
72 lockstruct->ls_lvb_size = min_lvb_size;
73 lockstruct->ls_lockspace = (lm_lockspace_t *)nl;
74 lockstruct->ls_ops = &nolock_ops;
75 lockstruct->ls_flags = LM_LSFLAG_LOCAL;
76
77 return 0;
78}
79
80/**
81 * nolock_others_may_mount - unmount a lock space
82 * @lockspace: the lockspace to unmount
83 *
84 */
85
86static void nolock_others_may_mount(lm_lockspace_t *lockspace)
87{
88}
89
90/**
91 * nolock_unmount - unmount a lock space
92 * @lockspace: the lockspace to unmount
93 *
94 */
95
96static void nolock_unmount(lm_lockspace_t *lockspace)
97{
98 struct nolock_lockspace *nl = (struct nolock_lockspace *)lockspace;
99 kfree(nl);
100}
101
102/**
103 * nolock_withdraw - withdraw from a lock space
104 * @lockspace: the lockspace
105 *
106 */
107
108static void nolock_withdraw(lm_lockspace_t *lockspace)
109{
110}
111
112/**
113 * nolock_get_lock - get a lm_lock_t given a descripton of the lock
114 * @lockspace: the lockspace the lock lives in
115 * @name: the name of the lock
116 * @lockp: return the lm_lock_t here
117 *
118 * Returns: 0 on success, -EXXX on failure
119 */
120
121static int nolock_get_lock(lm_lockspace_t *lockspace, struct lm_lockname *name,
122 lm_lock_t **lockp)
123{
124 *lockp = (lm_lock_t *)lockspace;
125 return 0;
126}
127
128/**
129 * nolock_put_lock - get rid of a lock structure
130 * @lock: the lock to throw away
131 *
132 */
133
134static void nolock_put_lock(lm_lock_t *lock)
135{
136}
137
138/**
139 * nolock_lock - acquire a lock
140 * @lock: the lock to manipulate
141 * @cur_state: the current state
142 * @req_state: the requested state
143 * @flags: modifier flags
144 *
145 * Returns: A bitmap of LM_OUT_*
146 */
147
148static unsigned int nolock_lock(lm_lock_t *lock, unsigned int cur_state,
149 unsigned int req_state, unsigned int flags)
150{
151 return req_state | LM_OUT_CACHEABLE;
152}
153
154/**
155 * nolock_unlock - unlock a lock
156 * @lock: the lock to manipulate
157 * @cur_state: the current state
158 *
159 * Returns: 0
160 */
161
162static unsigned int nolock_unlock(lm_lock_t *lock, unsigned int cur_state)
163{
164 return 0;
165}
166
167/**
168 * nolock_cancel - cancel a request on a lock
169 * @lock: the lock to cancel request for
170 *
171 */
172
173static void nolock_cancel(lm_lock_t *lock)
174{
175}
176
177/**
178 * nolock_hold_lvb - hold on to a lock value block
179 * @lock: the lock the LVB is associated with
180 * @lvbp: return the lm_lvb_t here
181 *
182 * Returns: 0 on success, -EXXX on failure
183 */
184
185static int nolock_hold_lvb(lm_lock_t *lock, char **lvbp)
186{
187 struct nolock_lockspace *nl = (struct nolock_lockspace *)lock;
188 int error = 0;
189
190 *lvbp = kmalloc(nl->nl_lvb_size, GFP_KERNEL);
191 if (*lvbp)
192 memset(*lvbp, 0, nl->nl_lvb_size);
193 else
194 error = -ENOMEM;
195
196 return error;
197}
198
199/**
200 * nolock_unhold_lvb - release a LVB
201 * @lock: the lock the LVB is associated with
202 * @lvb: the lock value block
203 *
204 */
205
206static void nolock_unhold_lvb(lm_lock_t *lock, char *lvb)
207{
208 kfree(lvb);
209}
210
211/**
212 * nolock_sync_lvb - sync out the value of a lvb
213 * @lock: the lock the LVB is associated with
214 * @lvb: the lock value block
215 *
216 */
217
218static void nolock_sync_lvb(lm_lock_t *lock, char *lvb)
219{
220}
221
222/**
223 * nolock_plock_get -
224 * @lockspace: the lockspace
225 * @name:
226 * @file:
227 * @fl:
228 *
229 * Returns: errno
230 */
231
232static int nolock_plock_get(lm_lockspace_t *lockspace, struct lm_lockname *name,
233 struct file *file, struct file_lock *fl)
234{
235 struct file_lock *tmp;
236
237 lock_kernel();
238 tmp = posix_test_lock(file, fl);
239 fl->fl_type = F_UNLCK;
240 if (tmp)
241 memcpy(fl, tmp, sizeof(struct file_lock));
242 unlock_kernel();
243
244 return 0;
245}
246
247/**
248 * nolock_plock -
249 * @lockspace: the lockspace
250 * @name:
251 * @file:
252 * @cmd:
253 * @fl:
254 *
255 * Returns: errno
256 */
257
258static int nolock_plock(lm_lockspace_t *lockspace, struct lm_lockname *name,
259 struct file *file, int cmd, struct file_lock *fl)
260{
261 int error;
262 lock_kernel();
263 error = posix_lock_file_wait(file, fl);
264 unlock_kernel();
265 return error;
266}
267
268/**
269 * nolock_punlock -
270 * @lockspace: the lockspace
271 * @name:
272 * @file:
273 * @fl:
274 *
275 * Returns: errno
276 */
277
278static int nolock_punlock(lm_lockspace_t *lockspace, struct lm_lockname *name,
279 struct file *file, struct file_lock *fl)
280{
281 int error;
282 lock_kernel();
283 error = posix_lock_file_wait(file, fl);
284 unlock_kernel();
285 return error;
286}
287
288/**
289 * nolock_recovery_done - reset the expired locks for a given jid
290 * @lockspace: the lockspace
291 * @jid: the jid
292 *
293 */
294
295static void nolock_recovery_done(lm_lockspace_t *lockspace, unsigned int jid,
296 unsigned int message)
297{
298}
299
300struct lm_lockops nolock_ops = {
301 .lm_proto_name = "lock_nolock",
302 .lm_mount = nolock_mount,
303 .lm_others_may_mount = nolock_others_may_mount,
304 .lm_unmount = nolock_unmount,
305 .lm_withdraw = nolock_withdraw,
306 .lm_get_lock = nolock_get_lock,
307 .lm_put_lock = nolock_put_lock,
308 .lm_lock = nolock_lock,
309 .lm_unlock = nolock_unlock,
310 .lm_cancel = nolock_cancel,
311 .lm_hold_lvb = nolock_hold_lvb,
312 .lm_unhold_lvb = nolock_unhold_lvb,
313 .lm_sync_lvb = nolock_sync_lvb,
314 .lm_plock_get = nolock_plock_get,
315 .lm_plock = nolock_plock,
316 .lm_punlock = nolock_punlock,
317 .lm_recovery_done = nolock_recovery_done,
318 .lm_owner = THIS_MODULE,
319};
320
321/**
322 * init_nolock - Initialize the nolock module
323 *
324 * Returns: 0 on success, -EXXX on failure
325 */
326
327int __init init_nolock(void)
328{
329 int error;
330
331 error = lm_register_proto(&nolock_ops);
332 if (error) {
333 printk("lock_nolock: can't register protocol: %d\n", error);
334 return error;
335 }
336
337 printk("Lock_Nolock (built %s %s) installed\n", __DATE__, __TIME__);
338 return 0;
339}
340
341/**
342 * exit_nolock - cleanup the nolock module
343 *
344 */
345
346void __exit exit_nolock(void)
347{
348 lm_unregister_proto(&nolock_ops);
349}
350
351module_init(init_nolock);
352module_exit(exit_nolock);
353
354MODULE_DESCRIPTION("GFS Nolock Locking Module");
355MODULE_AUTHOR("Red Hat, Inc.");
356MODULE_LICENSE("GPL");
357