aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-15 13:38:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-15 13:38:46 -0400
commit38c46578ffd8ffbfec514c2a9876d527303322d6 (patch)
treed55ad8a52048f70bc6ef51d388c0f21ff467155e /fs
parente7849f16c13476288fe4fbd420975e8456c75aa0 (diff)
parent4abaca17e758e3326c96ced88b2cd9b7b84922f6 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: [GFS2] Fix GFS2's use of do_div() in its quota calculations [GFS2] Remove unused declaration [GFS2] Remove support for unused and pointless flag [GFS2] Replace rgrp "recent list" with mru list [GFS2] Allow local DF locks when holding a cached EX glock [GFS2] Fix delayed demote race [GFS2] don't call permission() [GFS2] Fix module building [GFS2] Glock documentation [GFS2] Remove all_list from lock_dlm [GFS2] Remove obsolete conversion deadlock avoidance code [GFS2] Remove remote lock dropping code [GFS2] kernel panic mounting volume [GFS2] Revise readpage locking [GFS2] Fix ordering of args for list_add [GFS2] trivial sparse lock annotations [GFS2] No lock_nolock [GFS2] Fix ordering bug in lock_dlm [GFS2] Clean up the glock core
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/Kconfig18
-rw-r--r--fs/gfs2/Makefile1
-rw-r--r--fs/gfs2/gfs2.h5
-rw-r--r--fs/gfs2/glock.c1643
-rw-r--r--fs/gfs2/glock.h11
-rw-r--r--fs/gfs2/glops.c70
-rw-r--r--fs/gfs2/incore.h38
-rw-r--r--fs/gfs2/inode.c11
-rw-r--r--fs/gfs2/inode.h2
-rw-r--r--fs/gfs2/locking.c52
-rw-r--r--fs/gfs2/locking/dlm/lock.c368
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h18
-rw-r--r--fs/gfs2/locking/dlm/mount.c14
-rw-r--r--fs/gfs2/locking/dlm/sysfs.c13
-rw-r--r--fs/gfs2/locking/dlm/thread.c331
-rw-r--r--fs/gfs2/locking/nolock/Makefile3
-rw-r--r--fs/gfs2/locking/nolock/main.c238
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/gfs2/log.h2
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/meta_io.c14
-rw-r--r--fs/gfs2/meta_io.h1
-rw-r--r--fs/gfs2/ops_address.c40
-rw-r--r--fs/gfs2/ops_file.c38
-rw-r--r--fs/gfs2/ops_fstype.c8
-rw-r--r--fs/gfs2/ops_inode.c25
-rw-r--r--fs/gfs2/ops_super.c4
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/gfs2/recovery.c5
-rw-r--r--fs/gfs2/rgrp.c108
-rw-r--r--fs/gfs2/super.c4
-rw-r--r--fs/gfs2/sys.c16
32 files changed, 1161 insertions, 1946 deletions
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 7f7947e3dfbb..ab2f57e3fb87 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -14,23 +14,11 @@ config GFS2_FS
14 GFS is perfect consistency -- changes made to the filesystem on one 14 GFS is perfect consistency -- changes made to the filesystem on one
15 machine show up immediately on all other machines in the cluster. 15 machine show up immediately on all other machines in the cluster.
16 16
17 To use the GFS2 filesystem, you will need to enable one or more of 17 To use the GFS2 filesystem in a cluster, you will need to enable
18 the below locking modules. Documentation and utilities for GFS2 can 18 the locking module below. Documentation and utilities for GFS2 can
19 be found here: http://sources.redhat.com/cluster 19 be found here: http://sources.redhat.com/cluster
20 20
21config GFS2_FS_LOCKING_NOLOCK 21 The "nolock" lock module is now built in to GFS2 by default.
22 tristate "GFS2 \"nolock\" locking module"
23 depends on GFS2_FS
24 help
25 Single node locking module for GFS2.
26
27 Use this module if you want to use GFS2 on a single node without
28 its clustering features. You can still take advantage of the
29 large file support, and upgrade to running a full cluster later on
30 if required.
31
32 If you will only be using GFS2 in cluster mode, you do not need this
33 module.
34 22
35config GFS2_FS_LOCKING_DLM 23config GFS2_FS_LOCKING_DLM
36 tristate "GFS2 DLM locking module" 24 tristate "GFS2 DLM locking module"
diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile
index e2350df02a07..ec65851ec80a 100644
--- a/fs/gfs2/Makefile
+++ b/fs/gfs2/Makefile
@@ -5,6 +5,5 @@ gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \
5 ops_fstype.o ops_inode.o ops_super.o quota.o \ 5 ops_fstype.o ops_inode.o ops_super.o quota.o \
6 recovery.o rgrp.o super.o sys.o trans.o util.o 6 recovery.o rgrp.o super.o sys.o trans.o util.o
7 7
8obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += locking/nolock/
9obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += locking/dlm/ 8obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += locking/dlm/
10 9
diff --git a/fs/gfs2/gfs2.h b/fs/gfs2/gfs2.h
index 3bb11c0f8b56..ef606e3a5cf4 100644
--- a/fs/gfs2/gfs2.h
+++ b/fs/gfs2/gfs2.h
@@ -16,11 +16,6 @@ enum {
16}; 16};
17 17
18enum { 18enum {
19 NO_WAIT = 0,
20 WAIT = 1,
21};
22
23enum {
24 NO_FORCE = 0, 19 NO_FORCE = 0,
25 FORCE = 1, 20 FORCE = 1,
26}; 21};
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index d636b3e80f5d..13391e546616 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -45,21 +45,19 @@ struct gfs2_gl_hash_bucket {
45 struct hlist_head hb_list; 45 struct hlist_head hb_list;
46}; 46};
47 47
48struct glock_iter { 48struct gfs2_glock_iter {
49 int hash; /* hash bucket index */ 49 int hash; /* hash bucket index */
50 struct gfs2_sbd *sdp; /* incore superblock */ 50 struct gfs2_sbd *sdp; /* incore superblock */
51 struct gfs2_glock *gl; /* current glock struct */ 51 struct gfs2_glock *gl; /* current glock struct */
52 struct seq_file *seq; /* sequence file for debugfs */ 52 char string[512]; /* scratch space */
53 char string[512]; /* scratch space */
54}; 53};
55 54
56typedef void (*glock_examiner) (struct gfs2_glock * gl); 55typedef void (*glock_examiner) (struct gfs2_glock * gl);
57 56
58static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 57static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
59static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl); 58static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
60static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh); 59#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
61static void gfs2_glock_drop_th(struct gfs2_glock *gl); 60static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
62static void run_queue(struct gfs2_glock *gl);
63 61
64static DECLARE_RWSEM(gfs2_umount_flush_sem); 62static DECLARE_RWSEM(gfs2_umount_flush_sem);
65static struct dentry *gfs2_root; 63static struct dentry *gfs2_root;
@@ -123,33 +121,6 @@ static inline rwlock_t *gl_lock_addr(unsigned int x)
123#endif 121#endif
124 122
125/** 123/**
126 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
127 * @actual: the current state of the lock
128 * @requested: the lock state that was requested by the caller
129 * @flags: the modifier flags passed in by the caller
130 *
131 * Returns: 1 if the locks are compatible, 0 otherwise
132 */
133
134static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
135 int flags)
136{
137 if (actual == requested)
138 return 1;
139
140 if (flags & GL_EXACT)
141 return 0;
142
143 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
144 return 1;
145
146 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
147 return 1;
148
149 return 0;
150}
151
152/**
153 * gl_hash() - Turn glock number into hash bucket number 124 * gl_hash() - Turn glock number into hash bucket number
154 * @lock: The glock number 125 * @lock: The glock number
155 * 126 *
@@ -182,7 +153,7 @@ static void glock_free(struct gfs2_glock *gl)
182 struct gfs2_sbd *sdp = gl->gl_sbd; 153 struct gfs2_sbd *sdp = gl->gl_sbd;
183 struct inode *aspace = gl->gl_aspace; 154 struct inode *aspace = gl->gl_aspace;
184 155
185 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 156 if (sdp->sd_lockstruct.ls_ops->lm_put_lock)
186 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock); 157 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
187 158
188 if (aspace) 159 if (aspace)
@@ -211,17 +182,14 @@ static void gfs2_glock_hold(struct gfs2_glock *gl)
211int gfs2_glock_put(struct gfs2_glock *gl) 182int gfs2_glock_put(struct gfs2_glock *gl)
212{ 183{
213 int rv = 0; 184 int rv = 0;
214 struct gfs2_sbd *sdp = gl->gl_sbd;
215 185
216 write_lock(gl_lock_addr(gl->gl_hash)); 186 write_lock(gl_lock_addr(gl->gl_hash));
217 if (atomic_dec_and_test(&gl->gl_ref)) { 187 if (atomic_dec_and_test(&gl->gl_ref)) {
218 hlist_del(&gl->gl_list); 188 hlist_del(&gl->gl_list);
219 write_unlock(gl_lock_addr(gl->gl_hash)); 189 write_unlock(gl_lock_addr(gl->gl_hash));
220 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); 190 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED);
221 gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); 191 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim));
222 gfs2_assert(sdp, list_empty(&gl->gl_holders)); 192 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
223 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
224 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
225 glock_free(gl); 193 glock_free(gl);
226 rv = 1; 194 rv = 1;
227 goto out; 195 goto out;
@@ -281,22 +249,401 @@ static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
281 return gl; 249 return gl;
282} 250}
283 251
252/**
253 * may_grant - check if its ok to grant a new lock
254 * @gl: The glock
255 * @gh: The lock request which we wish to grant
256 *
257 * Returns: true if its ok to grant the lock
258 */
259
260static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
261{
262 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
263 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
264 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
265 return 0;
266 if (gl->gl_state == gh->gh_state)
267 return 1;
268 if (gh->gh_flags & GL_EXACT)
269 return 0;
270 if (gl->gl_state == LM_ST_EXCLUSIVE) {
271 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
272 return 1;
273 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
274 return 1;
275 }
276 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
277 return 1;
278 return 0;
279}
280
281static void gfs2_holder_wake(struct gfs2_holder *gh)
282{
283 clear_bit(HIF_WAIT, &gh->gh_iflags);
284 smp_mb__after_clear_bit();
285 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
286}
287
288/**
289 * do_promote - promote as many requests as possible on the current queue
290 * @gl: The glock
291 *
292 * Returns: true if there is a blocked holder at the head of the list
293 */
294
295static int do_promote(struct gfs2_glock *gl)
296{
297 const struct gfs2_glock_operations *glops = gl->gl_ops;
298 struct gfs2_holder *gh, *tmp;
299 int ret;
300
301restart:
302 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
303 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
304 continue;
305 if (may_grant(gl, gh)) {
306 if (gh->gh_list.prev == &gl->gl_holders &&
307 glops->go_lock) {
308 spin_unlock(&gl->gl_spin);
309 /* FIXME: eliminate this eventually */
310 ret = glops->go_lock(gh);
311 spin_lock(&gl->gl_spin);
312 if (ret) {
313 gh->gh_error = ret;
314 list_del_init(&gh->gh_list);
315 gfs2_holder_wake(gh);
316 goto restart;
317 }
318 set_bit(HIF_HOLDER, &gh->gh_iflags);
319 gfs2_holder_wake(gh);
320 goto restart;
321 }
322 set_bit(HIF_HOLDER, &gh->gh_iflags);
323 gfs2_holder_wake(gh);
324 continue;
325 }
326 if (gh->gh_list.prev == &gl->gl_holders)
327 return 1;
328 break;
329 }
330 return 0;
331}
332
333/**
334 * do_error - Something unexpected has happened during a lock request
335 *
336 */
337
338static inline void do_error(struct gfs2_glock *gl, const int ret)
339{
340 struct gfs2_holder *gh, *tmp;
341
342 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
343 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
344 continue;
345 if (ret & LM_OUT_ERROR)
346 gh->gh_error = -EIO;
347 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
348 gh->gh_error = GLR_TRYFAILED;
349 else
350 continue;
351 list_del_init(&gh->gh_list);
352 gfs2_holder_wake(gh);
353 }
354}
355
356/**
357 * find_first_waiter - find the first gh that's waiting for the glock
358 * @gl: the glock
359 */
360
361static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
362{
363 struct gfs2_holder *gh;
364
365 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
366 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
367 return gh;
368 }
369 return NULL;
370}
371
372/**
373 * state_change - record that the glock is now in a different state
374 * @gl: the glock
375 * @new_state the new state
376 *
377 */
378
379static void state_change(struct gfs2_glock *gl, unsigned int new_state)
380{
381 int held1, held2;
382
383 held1 = (gl->gl_state != LM_ST_UNLOCKED);
384 held2 = (new_state != LM_ST_UNLOCKED);
385
386 if (held1 != held2) {
387 if (held2)
388 gfs2_glock_hold(gl);
389 else
390 gfs2_glock_put(gl);
391 }
392
393 gl->gl_state = new_state;
394 gl->gl_tchange = jiffies;
395}
396
397static void gfs2_demote_wake(struct gfs2_glock *gl)
398{
399 gl->gl_demote_state = LM_ST_EXCLUSIVE;
400 clear_bit(GLF_DEMOTE, &gl->gl_flags);
401 smp_mb__after_clear_bit();
402 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
403}
404
405/**
406 * finish_xmote - The DLM has replied to one of our lock requests
407 * @gl: The glock
408 * @ret: The status from the DLM
409 *
410 */
411
412static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
413{
414 const struct gfs2_glock_operations *glops = gl->gl_ops;
415 struct gfs2_holder *gh;
416 unsigned state = ret & LM_OUT_ST_MASK;
417
418 spin_lock(&gl->gl_spin);
419 state_change(gl, state);
420 gh = find_first_waiter(gl);
421
422 /* Demote to UN request arrived during demote to SH or DF */
423 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
424 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
425 gl->gl_target = LM_ST_UNLOCKED;
426
427 /* Check for state != intended state */
428 if (unlikely(state != gl->gl_target)) {
429 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
430 /* move to back of queue and try next entry */
431 if (ret & LM_OUT_CANCELED) {
432 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
433 list_move_tail(&gh->gh_list, &gl->gl_holders);
434 gh = find_first_waiter(gl);
435 gl->gl_target = gh->gh_state;
436 goto retry;
437 }
438 /* Some error or failed "try lock" - report it */
439 if ((ret & LM_OUT_ERROR) ||
440 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
441 gl->gl_target = gl->gl_state;
442 do_error(gl, ret);
443 goto out;
444 }
445 }
446 switch(state) {
447 /* Unlocked due to conversion deadlock, try again */
448 case LM_ST_UNLOCKED:
449retry:
450 do_xmote(gl, gh, gl->gl_target);
451 break;
452 /* Conversion fails, unlock and try again */
453 case LM_ST_SHARED:
454 case LM_ST_DEFERRED:
455 do_xmote(gl, gh, LM_ST_UNLOCKED);
456 break;
457 default: /* Everything else */
458 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
459 GLOCK_BUG_ON(gl, 1);
460 }
461 spin_unlock(&gl->gl_spin);
462 gfs2_glock_put(gl);
463 return;
464 }
465
466 /* Fast path - we got what we asked for */
467 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
468 gfs2_demote_wake(gl);
469 if (state != LM_ST_UNLOCKED) {
470 if (glops->go_xmote_bh) {
471 int rv;
472 spin_unlock(&gl->gl_spin);
473 rv = glops->go_xmote_bh(gl, gh);
474 if (rv == -EAGAIN)
475 return;
476 spin_lock(&gl->gl_spin);
477 if (rv) {
478 do_error(gl, rv);
479 goto out;
480 }
481 }
482 do_promote(gl);
483 }
484out:
485 clear_bit(GLF_LOCK, &gl->gl_flags);
486 spin_unlock(&gl->gl_spin);
487 gfs2_glock_put(gl);
488}
489
490static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
491 unsigned int cur_state, unsigned int req_state,
492 unsigned int flags)
493{
494 int ret = LM_OUT_ERROR;
495
496 if (!sdp->sd_lockstruct.ls_ops->lm_lock)
497 return req_state == LM_ST_UNLOCKED ? 0 : req_state;
498
499 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
500 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
501 req_state, flags);
502 return ret;
503}
504
505/**
506 * do_xmote - Calls the DLM to change the state of a lock
507 * @gl: The lock state
508 * @gh: The holder (only for promotes)
509 * @target: The target lock state
510 *
511 */
512
513static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
514{
515 const struct gfs2_glock_operations *glops = gl->gl_ops;
516 struct gfs2_sbd *sdp = gl->gl_sbd;
517 unsigned int lck_flags = gh ? gh->gh_flags : 0;
518 int ret;
519
520 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
521 LM_FLAG_PRIORITY);
522 BUG_ON(gl->gl_state == target);
523 BUG_ON(gl->gl_state == gl->gl_target);
524 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
525 glops->go_inval) {
526 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
527 do_error(gl, 0); /* Fail queued try locks */
528 }
529 spin_unlock(&gl->gl_spin);
530 if (glops->go_xmote_th)
531 glops->go_xmote_th(gl);
532 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
533 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
534 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
535
536 gfs2_glock_hold(gl);
537 if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
538 gl->gl_state == LM_ST_DEFERRED) &&
539 !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
540 lck_flags |= LM_FLAG_TRY_1CB;
541 ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags);
542
543 if (!(ret & LM_OUT_ASYNC)) {
544 finish_xmote(gl, ret);
545 gfs2_glock_hold(gl);
546 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
547 gfs2_glock_put(gl);
548 } else {
549 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
550 }
551 spin_lock(&gl->gl_spin);
552}
553
554/**
555 * find_first_holder - find the first "holder" gh
556 * @gl: the glock
557 */
558
559static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
560{
561 struct gfs2_holder *gh;
562
563 if (!list_empty(&gl->gl_holders)) {
564 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
565 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
566 return gh;
567 }
568 return NULL;
569}
570
571/**
572 * run_queue - do all outstanding tasks related to a glock
573 * @gl: The glock in question
574 * @nonblock: True if we must not block in run_queue
575 *
576 */
577
578static void run_queue(struct gfs2_glock *gl, const int nonblock)
579{
580 struct gfs2_holder *gh = NULL;
581
582 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
583 return;
584
585 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
586
587 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
588 gl->gl_demote_state != gl->gl_state) {
589 if (find_first_holder(gl))
590 goto out;
591 if (nonblock)
592 goto out_sched;
593 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
594 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
595 gl->gl_target = gl->gl_demote_state;
596 } else {
597 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
598 gfs2_demote_wake(gl);
599 if (do_promote(gl) == 0)
600 goto out;
601 gh = find_first_waiter(gl);
602 gl->gl_target = gh->gh_state;
603 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
604 do_error(gl, 0); /* Fail queued try locks */
605 }
606 do_xmote(gl, gh, gl->gl_target);
607 return;
608
609out_sched:
610 gfs2_glock_hold(gl);
611 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
612 gfs2_glock_put(gl);
613out:
614 clear_bit(GLF_LOCK, &gl->gl_flags);
615}
616
284static void glock_work_func(struct work_struct *work) 617static void glock_work_func(struct work_struct *work)
285{ 618{
619 unsigned long delay = 0;
286 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 620 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
287 621
622 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
623 finish_xmote(gl, gl->gl_reply);
288 spin_lock(&gl->gl_spin); 624 spin_lock(&gl->gl_spin);
289 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)) 625 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
290 set_bit(GLF_DEMOTE, &gl->gl_flags); 626 gl->gl_state != LM_ST_UNLOCKED &&
291 run_queue(gl); 627 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
628 unsigned long holdtime, now = jiffies;
629 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
630 if (time_before(now, holdtime))
631 delay = holdtime - now;
632 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
633 }
634 run_queue(gl, 0);
292 spin_unlock(&gl->gl_spin); 635 spin_unlock(&gl->gl_spin);
293 gfs2_glock_put(gl); 636 if (!delay ||
637 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
638 gfs2_glock_put(gl);
294} 639}
295 640
296static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name, 641static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
297 void **lockp) 642 void **lockp)
298{ 643{
299 int error = -EIO; 644 int error = -EIO;
645 if (!sdp->sd_lockstruct.ls_ops->lm_get_lock)
646 return 0;
300 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 647 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
301 error = sdp->sd_lockstruct.ls_ops->lm_get_lock( 648 error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
302 sdp->sd_lockstruct.ls_lockspace, name, lockp); 649 sdp->sd_lockstruct.ls_lockspace, name, lockp);
@@ -342,12 +689,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
342 gl->gl_name = name; 689 gl->gl_name = name;
343 atomic_set(&gl->gl_ref, 1); 690 atomic_set(&gl->gl_ref, 1);
344 gl->gl_state = LM_ST_UNLOCKED; 691 gl->gl_state = LM_ST_UNLOCKED;
692 gl->gl_target = LM_ST_UNLOCKED;
345 gl->gl_demote_state = LM_ST_EXCLUSIVE; 693 gl->gl_demote_state = LM_ST_EXCLUSIVE;
346 gl->gl_hash = hash; 694 gl->gl_hash = hash;
347 gl->gl_owner_pid = NULL;
348 gl->gl_ip = 0;
349 gl->gl_ops = glops; 695 gl->gl_ops = glops;
350 gl->gl_req_gh = NULL;
351 gl->gl_stamp = jiffies; 696 gl->gl_stamp = jiffies;
352 gl->gl_tchange = jiffies; 697 gl->gl_tchange = jiffies;
353 gl->gl_object = NULL; 698 gl->gl_object = NULL;
@@ -447,13 +792,6 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
447 gh->gh_ip = 0; 792 gh->gh_ip = 0;
448} 793}
449 794
450static void gfs2_holder_wake(struct gfs2_holder *gh)
451{
452 clear_bit(HIF_WAIT, &gh->gh_iflags);
453 smp_mb__after_clear_bit();
454 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
455}
456
457static int just_schedule(void *word) 795static int just_schedule(void *word)
458{ 796{
459 schedule(); 797 schedule();
@@ -466,14 +804,6 @@ static void wait_on_holder(struct gfs2_holder *gh)
466 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE); 804 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
467} 805}
468 806
469static void gfs2_demote_wake(struct gfs2_glock *gl)
470{
471 gl->gl_demote_state = LM_ST_EXCLUSIVE;
472 clear_bit(GLF_DEMOTE, &gl->gl_flags);
473 smp_mb__after_clear_bit();
474 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
475}
476
477static void wait_on_demote(struct gfs2_glock *gl) 807static void wait_on_demote(struct gfs2_glock *gl)
478{ 808{
479 might_sleep(); 809 might_sleep();
@@ -481,217 +811,6 @@ static void wait_on_demote(struct gfs2_glock *gl)
481} 811}
482 812
483/** 813/**
484 * rq_mutex - process a mutex request in the queue
485 * @gh: the glock holder
486 *
487 * Returns: 1 if the queue is blocked
488 */
489
490static int rq_mutex(struct gfs2_holder *gh)
491{
492 struct gfs2_glock *gl = gh->gh_gl;
493
494 list_del_init(&gh->gh_list);
495 /* gh->gh_error never examined. */
496 set_bit(GLF_LOCK, &gl->gl_flags);
497 clear_bit(HIF_WAIT, &gh->gh_iflags);
498 smp_mb();
499 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
500
501 return 1;
502}
503
504/**
505 * rq_promote - process a promote request in the queue
506 * @gh: the glock holder
507 *
508 * Acquire a new inter-node lock, or change a lock state to more restrictive.
509 *
510 * Returns: 1 if the queue is blocked
511 */
512
513static int rq_promote(struct gfs2_holder *gh)
514{
515 struct gfs2_glock *gl = gh->gh_gl;
516
517 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
518 if (list_empty(&gl->gl_holders)) {
519 gl->gl_req_gh = gh;
520 set_bit(GLF_LOCK, &gl->gl_flags);
521 spin_unlock(&gl->gl_spin);
522 gfs2_glock_xmote_th(gh->gh_gl, gh);
523 spin_lock(&gl->gl_spin);
524 }
525 return 1;
526 }
527
528 if (list_empty(&gl->gl_holders)) {
529 set_bit(HIF_FIRST, &gh->gh_iflags);
530 set_bit(GLF_LOCK, &gl->gl_flags);
531 } else {
532 struct gfs2_holder *next_gh;
533 if (gh->gh_state == LM_ST_EXCLUSIVE)
534 return 1;
535 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
536 gh_list);
537 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
538 return 1;
539 }
540
541 list_move_tail(&gh->gh_list, &gl->gl_holders);
542 gh->gh_error = 0;
543 set_bit(HIF_HOLDER, &gh->gh_iflags);
544
545 gfs2_holder_wake(gh);
546
547 return 0;
548}
549
550/**
551 * rq_demote - process a demote request in the queue
552 * @gh: the glock holder
553 *
554 * Returns: 1 if the queue is blocked
555 */
556
557static int rq_demote(struct gfs2_glock *gl)
558{
559 if (!list_empty(&gl->gl_holders))
560 return 1;
561
562 if (gl->gl_state == gl->gl_demote_state ||
563 gl->gl_state == LM_ST_UNLOCKED) {
564 gfs2_demote_wake(gl);
565 return 0;
566 }
567
568 set_bit(GLF_LOCK, &gl->gl_flags);
569 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
570
571 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
572 gl->gl_state != LM_ST_EXCLUSIVE) {
573 spin_unlock(&gl->gl_spin);
574 gfs2_glock_drop_th(gl);
575 } else {
576 spin_unlock(&gl->gl_spin);
577 gfs2_glock_xmote_th(gl, NULL);
578 }
579
580 spin_lock(&gl->gl_spin);
581 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
582
583 return 0;
584}
585
586/**
587 * run_queue - process holder structures on a glock
588 * @gl: the glock
589 *
590 */
591static void run_queue(struct gfs2_glock *gl)
592{
593 struct gfs2_holder *gh;
594 int blocked = 1;
595
596 for (;;) {
597 if (test_bit(GLF_LOCK, &gl->gl_flags))
598 break;
599
600 if (!list_empty(&gl->gl_waiters1)) {
601 gh = list_entry(gl->gl_waiters1.next,
602 struct gfs2_holder, gh_list);
603 blocked = rq_mutex(gh);
604 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
605 blocked = rq_demote(gl);
606 if (test_bit(GLF_WAITERS2, &gl->gl_flags) &&
607 !blocked) {
608 set_bit(GLF_DEMOTE, &gl->gl_flags);
609 gl->gl_demote_state = LM_ST_UNLOCKED;
610 }
611 clear_bit(GLF_WAITERS2, &gl->gl_flags);
612 } else if (!list_empty(&gl->gl_waiters3)) {
613 gh = list_entry(gl->gl_waiters3.next,
614 struct gfs2_holder, gh_list);
615 blocked = rq_promote(gh);
616 } else
617 break;
618
619 if (blocked)
620 break;
621 }
622}
623
624/**
625 * gfs2_glmutex_lock - acquire a local lock on a glock
626 * @gl: the glock
627 *
628 * Gives caller exclusive access to manipulate a glock structure.
629 */
630
631static void gfs2_glmutex_lock(struct gfs2_glock *gl)
632{
633 spin_lock(&gl->gl_spin);
634 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
635 struct gfs2_holder gh;
636
637 gfs2_holder_init(gl, 0, 0, &gh);
638 set_bit(HIF_WAIT, &gh.gh_iflags);
639 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
640 spin_unlock(&gl->gl_spin);
641 wait_on_holder(&gh);
642 gfs2_holder_uninit(&gh);
643 } else {
644 gl->gl_owner_pid = get_pid(task_pid(current));
645 gl->gl_ip = (unsigned long)__builtin_return_address(0);
646 spin_unlock(&gl->gl_spin);
647 }
648}
649
650/**
651 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
652 * @gl: the glock
653 *
654 * Returns: 1 if the glock is acquired
655 */
656
657static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
658{
659 int acquired = 1;
660
661 spin_lock(&gl->gl_spin);
662 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
663 acquired = 0;
664 } else {
665 gl->gl_owner_pid = get_pid(task_pid(current));
666 gl->gl_ip = (unsigned long)__builtin_return_address(0);
667 }
668 spin_unlock(&gl->gl_spin);
669
670 return acquired;
671}
672
673/**
674 * gfs2_glmutex_unlock - release a local lock on a glock
675 * @gl: the glock
676 *
677 */
678
679static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
680{
681 struct pid *pid;
682
683 spin_lock(&gl->gl_spin);
684 clear_bit(GLF_LOCK, &gl->gl_flags);
685 pid = gl->gl_owner_pid;
686 gl->gl_owner_pid = NULL;
687 gl->gl_ip = 0;
688 run_queue(gl);
689 spin_unlock(&gl->gl_spin);
690
691 put_pid(pid);
692}
693
694/**
695 * handle_callback - process a demote request 814 * handle_callback - process a demote request
696 * @gl: the glock 815 * @gl: the glock
697 * @state: the state the caller wants us to change to 816 * @state: the state the caller wants us to change to
@@ -705,398 +824,45 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
705{ 824{
706 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; 825 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
707 826
708 spin_lock(&gl->gl_spin);
709 set_bit(bit, &gl->gl_flags); 827 set_bit(bit, &gl->gl_flags);
710 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 828 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
711 gl->gl_demote_state = state; 829 gl->gl_demote_state = state;
712 gl->gl_demote_time = jiffies; 830 gl->gl_demote_time = jiffies;
713 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && 831 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
714 gl->gl_object) { 832 gl->gl_object)
715 gfs2_glock_schedule_for_reclaim(gl); 833 gfs2_glock_schedule_for_reclaim(gl);
716 spin_unlock(&gl->gl_spin);
717 return;
718 }
719 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 834 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
720 gl->gl_demote_state != state) { 835 gl->gl_demote_state != state) {
721 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 836 gl->gl_demote_state = LM_ST_UNLOCKED;
722 set_bit(GLF_WAITERS2, &gl->gl_flags);
723 else
724 gl->gl_demote_state = LM_ST_UNLOCKED;
725 }
726 spin_unlock(&gl->gl_spin);
727}
728
729/**
730 * state_change - record that the glock is now in a different state
731 * @gl: the glock
732 * @new_state the new state
733 *
734 */
735
736static void state_change(struct gfs2_glock *gl, unsigned int new_state)
737{
738 int held1, held2;
739
740 held1 = (gl->gl_state != LM_ST_UNLOCKED);
741 held2 = (new_state != LM_ST_UNLOCKED);
742
743 if (held1 != held2) {
744 if (held2)
745 gfs2_glock_hold(gl);
746 else
747 gfs2_glock_put(gl);
748 } 837 }
749
750 gl->gl_state = new_state;
751 gl->gl_tchange = jiffies;
752} 838}
753 839
754/** 840/**
755 * drop_bh - Called after a lock module unlock completes 841 * gfs2_glock_wait - wait on a glock acquisition
756 * @gl: the glock
757 * @ret: the return status
758 *
759 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
760 * Doesn't drop the reference on the glock the top half took out
761 *
762 */
763
764static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
765{
766 struct gfs2_sbd *sdp = gl->gl_sbd;
767 struct gfs2_holder *gh = gl->gl_req_gh;
768
769 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
770 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
771 gfs2_assert_warn(sdp, !ret);
772
773 state_change(gl, LM_ST_UNLOCKED);
774
775 if (test_and_clear_bit(GLF_CONV_DEADLK, &gl->gl_flags)) {
776 spin_lock(&gl->gl_spin);
777 gh->gh_error = 0;
778 spin_unlock(&gl->gl_spin);
779 gfs2_glock_xmote_th(gl, gl->gl_req_gh);
780 gfs2_glock_put(gl);
781 return;
782 }
783
784 spin_lock(&gl->gl_spin);
785 gfs2_demote_wake(gl);
786 clear_bit(GLF_LOCK, &gl->gl_flags);
787 spin_unlock(&gl->gl_spin);
788 gfs2_glock_put(gl);
789}
790
791/**
792 * xmote_bh - Called after the lock module is done acquiring a lock
793 * @gl: The glock in question
794 * @ret: the int returned from the lock module
795 *
796 */
797
798static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
799{
800 struct gfs2_sbd *sdp = gl->gl_sbd;
801 const struct gfs2_glock_operations *glops = gl->gl_ops;
802 struct gfs2_holder *gh = gl->gl_req_gh;
803 int op_done = 1;
804
805 if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
806 drop_bh(gl, ret);
807 return;
808 }
809
810 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
811 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
812 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
813
814 state_change(gl, ret & LM_OUT_ST_MASK);
815
816 /* Deal with each possible exit condition */
817
818 if (!gh) {
819 gl->gl_stamp = jiffies;
820 if (ret & LM_OUT_CANCELED) {
821 op_done = 0;
822 } else {
823 spin_lock(&gl->gl_spin);
824 if (gl->gl_state != gl->gl_demote_state) {
825 spin_unlock(&gl->gl_spin);
826 gfs2_glock_drop_th(gl);
827 gfs2_glock_put(gl);
828 return;
829 }
830 gfs2_demote_wake(gl);
831 spin_unlock(&gl->gl_spin);
832 }
833 } else {
834 spin_lock(&gl->gl_spin);
835 if (ret & LM_OUT_CONV_DEADLK) {
836 gh->gh_error = 0;
837 set_bit(GLF_CONV_DEADLK, &gl->gl_flags);
838 spin_unlock(&gl->gl_spin);
839 gfs2_glock_drop_th(gl);
840 gfs2_glock_put(gl);
841 return;
842 }
843 list_del_init(&gh->gh_list);
844 gh->gh_error = -EIO;
845 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
846 goto out;
847 gh->gh_error = GLR_CANCELED;
848 if (ret & LM_OUT_CANCELED)
849 goto out;
850 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
851 list_add_tail(&gh->gh_list, &gl->gl_holders);
852 gh->gh_error = 0;
853 set_bit(HIF_HOLDER, &gh->gh_iflags);
854 set_bit(HIF_FIRST, &gh->gh_iflags);
855 op_done = 0;
856 goto out;
857 }
858 gh->gh_error = GLR_TRYFAILED;
859 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
860 goto out;
861 gh->gh_error = -EINVAL;
862 if (gfs2_assert_withdraw(sdp, 0) == -1)
863 fs_err(sdp, "ret = 0x%.8X\n", ret);
864out:
865 spin_unlock(&gl->gl_spin);
866 }
867
868 if (glops->go_xmote_bh)
869 glops->go_xmote_bh(gl);
870
871 if (op_done) {
872 spin_lock(&gl->gl_spin);
873 gl->gl_req_gh = NULL;
874 clear_bit(GLF_LOCK, &gl->gl_flags);
875 spin_unlock(&gl->gl_spin);
876 }
877
878 gfs2_glock_put(gl);
879
880 if (gh)
881 gfs2_holder_wake(gh);
882}
883
884static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
885 unsigned int cur_state, unsigned int req_state,
886 unsigned int flags)
887{
888 int ret = 0;
889 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
890 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
891 req_state, flags);
892 return ret;
893}
894
895/**
896 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
897 * @gl: The glock in question
898 * @state: the requested state
899 * @flags: modifier flags to the lock call
900 *
901 */
902
903static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
904{
905 struct gfs2_sbd *sdp = gl->gl_sbd;
906 int flags = gh ? gh->gh_flags : 0;
907 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
908 const struct gfs2_glock_operations *glops = gl->gl_ops;
909 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
910 LM_FLAG_NOEXP | LM_FLAG_ANY |
911 LM_FLAG_PRIORITY);
912 unsigned int lck_ret;
913
914 if (glops->go_xmote_th)
915 glops->go_xmote_th(gl);
916 if (state == LM_ST_DEFERRED && glops->go_inval)
917 glops->go_inval(gl, DIO_METADATA);
918
919 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
920 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
921 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
922 gfs2_assert_warn(sdp, state != gl->gl_state);
923
924 gfs2_glock_hold(gl);
925
926 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
927
928 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
929 return;
930
931 if (lck_ret & LM_OUT_ASYNC)
932 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
933 else
934 xmote_bh(gl, lck_ret);
935}
936
937static unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
938 unsigned int cur_state)
939{
940 int ret = 0;
941 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
942 ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
943 return ret;
944}
945
946/**
947 * gfs2_glock_drop_th - call into the lock module to unlock a lock
948 * @gl: the glock
949 *
950 */
951
952static void gfs2_glock_drop_th(struct gfs2_glock *gl)
953{
954 struct gfs2_sbd *sdp = gl->gl_sbd;
955 const struct gfs2_glock_operations *glops = gl->gl_ops;
956 unsigned int ret;
957
958 if (glops->go_xmote_th)
959 glops->go_xmote_th(gl);
960 if (glops->go_inval)
961 glops->go_inval(gl, DIO_METADATA);
962
963 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
964 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
965 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
966
967 gfs2_glock_hold(gl);
968
969 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
970
971 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
972 return;
973
974 if (!ret)
975 drop_bh(gl, ret);
976 else
977 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
978}
979
980/**
981 * do_cancels - cancel requests for locks stuck waiting on an expire flag
982 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
983 *
984 * Don't cancel GL_NOCANCEL requests.
985 */
986
987static void do_cancels(struct gfs2_holder *gh)
988{
989 struct gfs2_glock *gl = gh->gh_gl;
990 struct gfs2_sbd *sdp = gl->gl_sbd;
991
992 spin_lock(&gl->gl_spin);
993
994 while (gl->gl_req_gh != gh &&
995 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
996 !list_empty(&gh->gh_list)) {
997 if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
998 spin_unlock(&gl->gl_spin);
999 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1000 sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
1001 msleep(100);
1002 spin_lock(&gl->gl_spin);
1003 } else {
1004 spin_unlock(&gl->gl_spin);
1005 msleep(100);
1006 spin_lock(&gl->gl_spin);
1007 }
1008 }
1009
1010 spin_unlock(&gl->gl_spin);
1011}
1012
1013/**
1014 * glock_wait_internal - wait on a glock acquisition
1015 * @gh: the glock holder 842 * @gh: the glock holder
1016 * 843 *
1017 * Returns: 0 on success 844 * Returns: 0 on success
1018 */ 845 */
1019 846
1020static int glock_wait_internal(struct gfs2_holder *gh) 847int gfs2_glock_wait(struct gfs2_holder *gh)
1021{ 848{
1022 struct gfs2_glock *gl = gh->gh_gl;
1023 struct gfs2_sbd *sdp = gl->gl_sbd;
1024 const struct gfs2_glock_operations *glops = gl->gl_ops;
1025
1026 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1027 return -EIO;
1028
1029 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1030 spin_lock(&gl->gl_spin);
1031 if (gl->gl_req_gh != gh &&
1032 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1033 !list_empty(&gh->gh_list)) {
1034 list_del_init(&gh->gh_list);
1035 gh->gh_error = GLR_TRYFAILED;
1036 run_queue(gl);
1037 spin_unlock(&gl->gl_spin);
1038 return gh->gh_error;
1039 }
1040 spin_unlock(&gl->gl_spin);
1041 }
1042
1043 if (gh->gh_flags & LM_FLAG_PRIORITY)
1044 do_cancels(gh);
1045
1046 wait_on_holder(gh); 849 wait_on_holder(gh);
1047 if (gh->gh_error)
1048 return gh->gh_error;
1049
1050 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1051 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1052 gh->gh_flags));
1053
1054 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1055 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1056
1057 if (glops->go_lock) {
1058 gh->gh_error = glops->go_lock(gh);
1059 if (gh->gh_error) {
1060 spin_lock(&gl->gl_spin);
1061 list_del_init(&gh->gh_list);
1062 spin_unlock(&gl->gl_spin);
1063 }
1064 }
1065
1066 spin_lock(&gl->gl_spin);
1067 gl->gl_req_gh = NULL;
1068 clear_bit(GLF_LOCK, &gl->gl_flags);
1069 run_queue(gl);
1070 spin_unlock(&gl->gl_spin);
1071 }
1072
1073 return gh->gh_error; 850 return gh->gh_error;
1074} 851}
1075 852
1076static inline struct gfs2_holder * 853void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
1077find_holder_by_owner(struct list_head *head, struct pid *pid)
1078{
1079 struct gfs2_holder *gh;
1080
1081 list_for_each_entry(gh, head, gh_list) {
1082 if (gh->gh_owner_pid == pid)
1083 return gh;
1084 }
1085
1086 return NULL;
1087}
1088
1089static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1090{ 854{
1091 va_list args; 855 va_list args;
1092 856
1093 va_start(args, fmt); 857 va_start(args, fmt);
1094 if (gi) { 858 if (seq) {
859 struct gfs2_glock_iter *gi = seq->private;
1095 vsprintf(gi->string, fmt, args); 860 vsprintf(gi->string, fmt, args);
1096 seq_printf(gi->seq, gi->string); 861 seq_printf(seq, gi->string);
1097 } 862 } else {
1098 else 863 printk(KERN_ERR " ");
1099 vprintk(fmt, args); 864 vprintk(fmt, args);
865 }
1100 va_end(args); 866 va_end(args);
1101} 867}
1102 868
@@ -1104,50 +870,76 @@ static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1104 * add_to_queue - Add a holder to the wait queue (but look for recursion) 870 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1105 * @gh: the holder structure to add 871 * @gh: the holder structure to add
1106 * 872 *
873 * Eventually we should move the recursive locking trap to a
874 * debugging option or something like that. This is the fast
875 * path and needs to have the minimum number of distractions.
876 *
1107 */ 877 */
1108 878
1109static void add_to_queue(struct gfs2_holder *gh) 879static inline void add_to_queue(struct gfs2_holder *gh)
1110{ 880{
1111 struct gfs2_glock *gl = gh->gh_gl; 881 struct gfs2_glock *gl = gh->gh_gl;
1112 struct gfs2_holder *existing; 882 struct gfs2_sbd *sdp = gl->gl_sbd;
883 struct list_head *insert_pt = NULL;
884 struct gfs2_holder *gh2;
885 int try_lock = 0;
1113 886
1114 BUG_ON(gh->gh_owner_pid == NULL); 887 BUG_ON(gh->gh_owner_pid == NULL);
1115 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 888 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1116 BUG(); 889 BUG();
1117 890
1118 if (!(gh->gh_flags & GL_FLOCK)) { 891 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1119 existing = find_holder_by_owner(&gl->gl_holders, 892 if (test_bit(GLF_LOCK, &gl->gl_flags))
1120 gh->gh_owner_pid); 893 try_lock = 1;
1121 if (existing) { 894 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1122 print_symbol(KERN_WARNING "original: %s\n", 895 goto fail;
1123 existing->gh_ip); 896 }
1124 printk(KERN_INFO "pid : %d\n", 897
1125 pid_nr(existing->gh_owner_pid)); 898 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1126 printk(KERN_INFO "lock type : %d lock state : %d\n", 899 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1127 existing->gh_gl->gl_name.ln_type, 900 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1128 existing->gh_gl->gl_state); 901 goto trap_recursive;
1129 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); 902 if (try_lock &&
1130 printk(KERN_INFO "pid : %d\n", 903 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
1131 pid_nr(gh->gh_owner_pid)); 904 !may_grant(gl, gh)) {
1132 printk(KERN_INFO "lock type : %d lock state : %d\n", 905fail:
1133 gl->gl_name.ln_type, gl->gl_state); 906 gh->gh_error = GLR_TRYFAILED;
1134 BUG(); 907 gfs2_holder_wake(gh);
1135 } 908 return;
1136
1137 existing = find_holder_by_owner(&gl->gl_waiters3,
1138 gh->gh_owner_pid);
1139 if (existing) {
1140 print_symbol(KERN_WARNING "original: %s\n",
1141 existing->gh_ip);
1142 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1143 BUG();
1144 } 909 }
910 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
911 continue;
912 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
913 insert_pt = &gh2->gh_list;
914 }
915 if (likely(insert_pt == NULL)) {
916 list_add_tail(&gh->gh_list, &gl->gl_holders);
917 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
918 goto do_cancel;
919 return;
920 }
921 list_add_tail(&gh->gh_list, insert_pt);
922do_cancel:
923 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
924 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
925 spin_unlock(&gl->gl_spin);
926 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
927 sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
928 spin_lock(&gl->gl_spin);
1145 } 929 }
930 return;
1146 931
1147 if (gh->gh_flags & LM_FLAG_PRIORITY) 932trap_recursive:
1148 list_add(&gh->gh_list, &gl->gl_waiters3); 933 print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1149 else 934 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1150 list_add_tail(&gh->gh_list, &gl->gl_waiters3); 935 printk(KERN_ERR "lock type: %d req lock state : %d\n",
936 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
937 print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
938 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
939 printk(KERN_ERR "lock type: %d req lock state : %d\n",
940 gh->gh_gl->gl_name.ln_type, gh->gh_state);
941 __dump_glock(NULL, gl);
942 BUG();
1151} 943}
1152 944
1153/** 945/**
@@ -1165,24 +957,16 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
1165 struct gfs2_sbd *sdp = gl->gl_sbd; 957 struct gfs2_sbd *sdp = gl->gl_sbd;
1166 int error = 0; 958 int error = 0;
1167 959
1168restart: 960 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1169 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1170 set_bit(HIF_ABORTED, &gh->gh_iflags);
1171 return -EIO; 961 return -EIO;
1172 }
1173 962
1174 spin_lock(&gl->gl_spin); 963 spin_lock(&gl->gl_spin);
1175 add_to_queue(gh); 964 add_to_queue(gh);
1176 run_queue(gl); 965 run_queue(gl, 1);
1177 spin_unlock(&gl->gl_spin); 966 spin_unlock(&gl->gl_spin);
1178 967
1179 if (!(gh->gh_flags & GL_ASYNC)) { 968 if (!(gh->gh_flags & GL_ASYNC))
1180 error = glock_wait_internal(gh); 969 error = gfs2_glock_wait(gh);
1181 if (error == GLR_CANCELED) {
1182 msleep(100);
1183 goto restart;
1184 }
1185 }
1186 970
1187 return error; 971 return error;
1188} 972}
@@ -1196,48 +980,7 @@ restart:
1196 980
1197int gfs2_glock_poll(struct gfs2_holder *gh) 981int gfs2_glock_poll(struct gfs2_holder *gh)
1198{ 982{
1199 struct gfs2_glock *gl = gh->gh_gl; 983 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1200 int ready = 0;
1201
1202 spin_lock(&gl->gl_spin);
1203
1204 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1205 ready = 1;
1206 else if (list_empty(&gh->gh_list)) {
1207 if (gh->gh_error == GLR_CANCELED) {
1208 spin_unlock(&gl->gl_spin);
1209 msleep(100);
1210 if (gfs2_glock_nq(gh))
1211 return 1;
1212 return 0;
1213 } else
1214 ready = 1;
1215 }
1216
1217 spin_unlock(&gl->gl_spin);
1218
1219 return ready;
1220}
1221
1222/**
1223 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1224 * @gh: the holder structure
1225 *
1226 * Returns: 0, GLR_TRYFAILED, or errno on failure
1227 */
1228
1229int gfs2_glock_wait(struct gfs2_holder *gh)
1230{
1231 int error;
1232
1233 error = glock_wait_internal(gh);
1234 if (error == GLR_CANCELED) {
1235 msleep(100);
1236 gh->gh_flags &= ~GL_ASYNC;
1237 error = gfs2_glock_nq(gh);
1238 }
1239
1240 return error;
1241} 984}
1242 985
1243/** 986/**
@@ -1251,26 +994,30 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1251 struct gfs2_glock *gl = gh->gh_gl; 994 struct gfs2_glock *gl = gh->gh_gl;
1252 const struct gfs2_glock_operations *glops = gl->gl_ops; 995 const struct gfs2_glock_operations *glops = gl->gl_ops;
1253 unsigned delay = 0; 996 unsigned delay = 0;
997 int fast_path = 0;
1254 998
999 spin_lock(&gl->gl_spin);
1255 if (gh->gh_flags & GL_NOCACHE) 1000 if (gh->gh_flags & GL_NOCACHE)
1256 handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1001 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1257 1002
1258 gfs2_glmutex_lock(gl);
1259
1260 spin_lock(&gl->gl_spin);
1261 list_del_init(&gh->gh_list); 1003 list_del_init(&gh->gh_list);
1262 1004 if (find_first_holder(gl) == NULL) {
1263 if (list_empty(&gl->gl_holders)) {
1264 if (glops->go_unlock) { 1005 if (glops->go_unlock) {
1006 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1265 spin_unlock(&gl->gl_spin); 1007 spin_unlock(&gl->gl_spin);
1266 glops->go_unlock(gh); 1008 glops->go_unlock(gh);
1267 spin_lock(&gl->gl_spin); 1009 spin_lock(&gl->gl_spin);
1010 clear_bit(GLF_LOCK, &gl->gl_flags);
1268 } 1011 }
1269 gl->gl_stamp = jiffies; 1012 gl->gl_stamp = jiffies;
1013 if (list_empty(&gl->gl_holders) &&
1014 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1015 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1016 fast_path = 1;
1270 } 1017 }
1271
1272 clear_bit(GLF_LOCK, &gl->gl_flags);
1273 spin_unlock(&gl->gl_spin); 1018 spin_unlock(&gl->gl_spin);
1019 if (likely(fast_path))
1020 return;
1274 1021
1275 gfs2_glock_hold(gl); 1022 gfs2_glock_hold(gl);
1276 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1023 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -1454,6 +1201,8 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1454static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp) 1201static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
1455{ 1202{
1456 int error = -EIO; 1203 int error = -EIO;
1204 if (!sdp->sd_lockstruct.ls_ops->lm_hold_lvb)
1205 return 0;
1457 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 1206 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1458 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp); 1207 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
1459 return error; 1208 return error;
@@ -1469,20 +1218,14 @@ int gfs2_lvb_hold(struct gfs2_glock *gl)
1469{ 1218{
1470 int error; 1219 int error;
1471 1220
1472 gfs2_glmutex_lock(gl);
1473
1474 if (!atomic_read(&gl->gl_lvb_count)) { 1221 if (!atomic_read(&gl->gl_lvb_count)) {
1475 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); 1222 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1476 if (error) { 1223 if (error)
1477 gfs2_glmutex_unlock(gl);
1478 return error; 1224 return error;
1479 }
1480 gfs2_glock_hold(gl); 1225 gfs2_glock_hold(gl);
1481 } 1226 }
1482 atomic_inc(&gl->gl_lvb_count); 1227 atomic_inc(&gl->gl_lvb_count);
1483 1228
1484 gfs2_glmutex_unlock(gl);
1485
1486 return 0; 1229 return 0;
1487} 1230}
1488 1231
@@ -1497,17 +1240,13 @@ void gfs2_lvb_unhold(struct gfs2_glock *gl)
1497 struct gfs2_sbd *sdp = gl->gl_sbd; 1240 struct gfs2_sbd *sdp = gl->gl_sbd;
1498 1241
1499 gfs2_glock_hold(gl); 1242 gfs2_glock_hold(gl);
1500 gfs2_glmutex_lock(gl);
1501
1502 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); 1243 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1503 if (atomic_dec_and_test(&gl->gl_lvb_count)) { 1244 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1504 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 1245 if (sdp->sd_lockstruct.ls_ops->lm_unhold_lvb)
1505 sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb); 1246 sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
1506 gl->gl_lvb = NULL; 1247 gl->gl_lvb = NULL;
1507 gfs2_glock_put(gl); 1248 gfs2_glock_put(gl);
1508 } 1249 }
1509
1510 gfs2_glmutex_unlock(gl);
1511 gfs2_glock_put(gl); 1250 gfs2_glock_put(gl);
1512} 1251}
1513 1252
@@ -1527,7 +1266,9 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1527 if (time_before(now, holdtime)) 1266 if (time_before(now, holdtime))
1528 delay = holdtime - now; 1267 delay = holdtime - now;
1529 1268
1269 spin_lock(&gl->gl_spin);
1530 handle_callback(gl, state, 1, delay); 1270 handle_callback(gl, state, 1, delay);
1271 spin_unlock(&gl->gl_spin);
1531 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1272 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1532 gfs2_glock_put(gl); 1273 gfs2_glock_put(gl);
1533} 1274}
@@ -1568,7 +1309,8 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1568 gl = gfs2_glock_find(sdp, &async->lc_name); 1309 gl = gfs2_glock_find(sdp, &async->lc_name);
1569 if (gfs2_assert_warn(sdp, gl)) 1310 if (gfs2_assert_warn(sdp, gl))
1570 return; 1311 return;
1571 xmote_bh(gl, async->lc_ret); 1312 gl->gl_reply = async->lc_ret;
1313 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1572 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1314 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1573 gfs2_glock_put(gl); 1315 gfs2_glock_put(gl);
1574 up_read(&gfs2_umount_flush_sem); 1316 up_read(&gfs2_umount_flush_sem);
@@ -1581,11 +1323,6 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1581 wake_up_process(sdp->sd_recoverd_process); 1323 wake_up_process(sdp->sd_recoverd_process);
1582 return; 1324 return;
1583 1325
1584 case LM_CB_DROPLOCKS:
1585 gfs2_gl_hash_clear(sdp, NO_WAIT);
1586 gfs2_quota_scan(sdp);
1587 return;
1588
1589 default: 1326 default:
1590 gfs2_assert_warn(sdp, 0); 1327 gfs2_assert_warn(sdp, 0);
1591 return; 1328 return;
@@ -1646,6 +1383,7 @@ void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1646void gfs2_reclaim_glock(struct gfs2_sbd *sdp) 1383void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1647{ 1384{
1648 struct gfs2_glock *gl; 1385 struct gfs2_glock *gl;
1386 int done_callback = 0;
1649 1387
1650 spin_lock(&sdp->sd_reclaim_lock); 1388 spin_lock(&sdp->sd_reclaim_lock);
1651 if (list_empty(&sdp->sd_reclaim_list)) { 1389 if (list_empty(&sdp->sd_reclaim_list)) {
@@ -1660,14 +1398,16 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1660 atomic_dec(&sdp->sd_reclaim_count); 1398 atomic_dec(&sdp->sd_reclaim_count);
1661 atomic_inc(&sdp->sd_reclaimed); 1399 atomic_inc(&sdp->sd_reclaimed);
1662 1400
1663 if (gfs2_glmutex_trylock(gl)) { 1401 spin_lock(&gl->gl_spin);
1664 if (list_empty(&gl->gl_holders) && 1402 if (find_first_holder(gl) == NULL &&
1665 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1403 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) {
1666 handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1404 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1667 gfs2_glmutex_unlock(gl); 1405 done_callback = 1;
1668 } 1406 }
1669 1407 spin_unlock(&gl->gl_spin);
1670 gfs2_glock_put(gl); 1408 if (!done_callback ||
1409 queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1410 gfs2_glock_put(gl);
1671} 1411}
1672 1412
1673/** 1413/**
@@ -1724,18 +1464,14 @@ static void scan_glock(struct gfs2_glock *gl)
1724{ 1464{
1725 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) 1465 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1726 return; 1466 return;
1467 if (test_bit(GLF_LOCK, &gl->gl_flags))
1468 return;
1727 1469
1728 if (gfs2_glmutex_trylock(gl)) { 1470 spin_lock(&gl->gl_spin);
1729 if (list_empty(&gl->gl_holders) && 1471 if (find_first_holder(gl) == NULL &&
1730 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1472 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1731 goto out_schedule; 1473 gfs2_glock_schedule_for_reclaim(gl);
1732 gfs2_glmutex_unlock(gl); 1474 spin_unlock(&gl->gl_spin);
1733 }
1734 return;
1735
1736out_schedule:
1737 gfs2_glmutex_unlock(gl);
1738 gfs2_glock_schedule_for_reclaim(gl);
1739} 1475}
1740 1476
1741/** 1477/**
@@ -1760,12 +1496,13 @@ static void clear_glock(struct gfs2_glock *gl)
1760 spin_unlock(&sdp->sd_reclaim_lock); 1496 spin_unlock(&sdp->sd_reclaim_lock);
1761 } 1497 }
1762 1498
1763 if (gfs2_glmutex_trylock(gl)) { 1499 spin_lock(&gl->gl_spin);
1764 if (list_empty(&gl->gl_holders) && 1500 if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1765 gl->gl_state != LM_ST_UNLOCKED) 1501 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1766 handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1502 spin_unlock(&gl->gl_spin);
1767 gfs2_glmutex_unlock(gl); 1503 gfs2_glock_hold(gl);
1768 } 1504 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1505 gfs2_glock_put(gl);
1769} 1506}
1770 1507
1771/** 1508/**
@@ -1773,11 +1510,10 @@ static void clear_glock(struct gfs2_glock *gl)
1773 * @sdp: the filesystem 1510 * @sdp: the filesystem
1774 * @wait: wait until it's all gone 1511 * @wait: wait until it's all gone
1775 * 1512 *
1776 * Called when unmounting the filesystem, or when inter-node lock manager 1513 * Called when unmounting the filesystem.
1777 * requests DROPLOCKS because it is running out of capacity.
1778 */ 1514 */
1779 1515
1780void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) 1516void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1781{ 1517{
1782 unsigned long t; 1518 unsigned long t;
1783 unsigned int x; 1519 unsigned int x;
@@ -1792,7 +1528,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1792 cont = 1; 1528 cont = 1;
1793 } 1529 }
1794 1530
1795 if (!wait || !cont) 1531 if (!cont)
1796 break; 1532 break;
1797 1533
1798 if (time_after_eq(jiffies, 1534 if (time_after_eq(jiffies,
@@ -1810,180 +1546,164 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1810 } 1546 }
1811} 1547}
1812 1548
1813/* 1549static const char *state2str(unsigned state)
1814 * Diagnostic routines to help debug distributed deadlock
1815 */
1816
1817static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1818 unsigned long address)
1819{ 1550{
1820 char buffer[KSYM_SYMBOL_LEN]; 1551 switch(state) {
1821 1552 case LM_ST_UNLOCKED:
1822 sprint_symbol(buffer, address); 1553 return "UN";
1823 print_dbg(gi, fmt, buffer); 1554 case LM_ST_SHARED:
1555 return "SH";
1556 case LM_ST_DEFERRED:
1557 return "DF";
1558 case LM_ST_EXCLUSIVE:
1559 return "EX";
1560 }
1561 return "??";
1562}
1563
1564static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1565{
1566 char *p = buf;
1567 if (flags & LM_FLAG_TRY)
1568 *p++ = 't';
1569 if (flags & LM_FLAG_TRY_1CB)
1570 *p++ = 'T';
1571 if (flags & LM_FLAG_NOEXP)
1572 *p++ = 'e';
1573 if (flags & LM_FLAG_ANY)
1574 *p++ = 'a';
1575 if (flags & LM_FLAG_PRIORITY)
1576 *p++ = 'p';
1577 if (flags & GL_ASYNC)
1578 *p++ = 'a';
1579 if (flags & GL_EXACT)
1580 *p++ = 'E';
1581 if (flags & GL_ATIME)
1582 *p++ = 'a';
1583 if (flags & GL_NOCACHE)
1584 *p++ = 'c';
1585 if (test_bit(HIF_HOLDER, &iflags))
1586 *p++ = 'H';
1587 if (test_bit(HIF_WAIT, &iflags))
1588 *p++ = 'W';
1589 if (test_bit(HIF_FIRST, &iflags))
1590 *p++ = 'F';
1591 *p = 0;
1592 return buf;
1824} 1593}
1825 1594
1826/** 1595/**
1827 * dump_holder - print information about a glock holder 1596 * dump_holder - print information about a glock holder
1828 * @str: a string naming the type of holder 1597 * @seq: the seq_file struct
1829 * @gh: the glock holder 1598 * @gh: the glock holder
1830 * 1599 *
1831 * Returns: 0 on success, -ENOBUFS when we run out of space 1600 * Returns: 0 on success, -ENOBUFS when we run out of space
1832 */ 1601 */
1833 1602
1834static int dump_holder(struct glock_iter *gi, char *str, 1603static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1835 struct gfs2_holder *gh)
1836{ 1604{
1837 unsigned int x; 1605 struct task_struct *gh_owner = NULL;
1838 struct task_struct *gh_owner; 1606 char buffer[KSYM_SYMBOL_LEN];
1607 char flags_buf[32];
1839 1608
1840 print_dbg(gi, " %s\n", str); 1609 sprint_symbol(buffer, gh->gh_ip);
1841 if (gh->gh_owner_pid) { 1610 if (gh->gh_owner_pid)
1842 print_dbg(gi, " owner = %ld ",
1843 (long)pid_nr(gh->gh_owner_pid));
1844 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 1611 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1845 if (gh_owner) 1612 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1846 print_dbg(gi, "(%s)\n", gh_owner->comm); 1613 state2str(gh->gh_state),
1847 else 1614 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1848 print_dbg(gi, "(ended)\n"); 1615 gh->gh_error,
1849 } else 1616 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1850 print_dbg(gi, " owner = -1\n"); 1617 gh_owner ? gh_owner->comm : "(ended)", buffer);
1851 print_dbg(gi, " gh_state = %u\n", gh->gh_state);
1852 print_dbg(gi, " gh_flags =");
1853 for (x = 0; x < 32; x++)
1854 if (gh->gh_flags & (1 << x))
1855 print_dbg(gi, " %u", x);
1856 print_dbg(gi, " \n");
1857 print_dbg(gi, " error = %d\n", gh->gh_error);
1858 print_dbg(gi, " gh_iflags =");
1859 for (x = 0; x < 32; x++)
1860 if (test_bit(x, &gh->gh_iflags))
1861 print_dbg(gi, " %u", x);
1862 print_dbg(gi, " \n");
1863 gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip);
1864
1865 return 0; 1618 return 0;
1866} 1619}
1867 1620
1868/** 1621static const char *gflags2str(char *buf, const unsigned long *gflags)
1869 * dump_inode - print information about an inode 1622{
1870 * @ip: the inode 1623 char *p = buf;
1871 * 1624 if (test_bit(GLF_LOCK, gflags))
1872 * Returns: 0 on success, -ENOBUFS when we run out of space 1625 *p++ = 'l';
1873 */ 1626 if (test_bit(GLF_STICKY, gflags))
1874 1627 *p++ = 's';
1875static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip) 1628 if (test_bit(GLF_DEMOTE, gflags))
1876{ 1629 *p++ = 'D';
1877 unsigned int x; 1630 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1878 1631 *p++ = 'd';
1879 print_dbg(gi, " Inode:\n"); 1632 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1880 print_dbg(gi, " num = %llu/%llu\n", 1633 *p++ = 'p';
1881 (unsigned long long)ip->i_no_formal_ino, 1634 if (test_bit(GLF_DIRTY, gflags))
1882 (unsigned long long)ip->i_no_addr); 1635 *p++ = 'y';
1883 print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); 1636 if (test_bit(GLF_LFLUSH, gflags))
1884 print_dbg(gi, " i_flags ="); 1637 *p++ = 'f';
1885 for (x = 0; x < 32; x++) 1638 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1886 if (test_bit(x, &ip->i_flags)) 1639 *p++ = 'i';
1887 print_dbg(gi, " %u", x); 1640 if (test_bit(GLF_REPLY_PENDING, gflags))
1888 print_dbg(gi, " \n"); 1641 *p++ = 'r';
1889 return 0; 1642 *p = 0;
1643 return buf;
1890} 1644}
1891 1645
1892/** 1646/**
1893 * dump_glock - print information about a glock 1647 * __dump_glock - print information about a glock
1648 * @seq: The seq_file struct
1894 * @gl: the glock 1649 * @gl: the glock
1895 * @count: where we are in the buffer 1650 *
1651 * The file format is as follows:
1652 * One line per object, capital letters are used to indicate objects
1653 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1654 * other objects are indented by a single space and follow the glock to
1655 * which they are related. Fields are indicated by lower case letters
1656 * followed by a colon and the field value, except for strings which are in
1657 * [] so that its possible to see if they are composed of spaces for
1658 * example. The field's are n = number (id of the object), f = flags,
1659 * t = type, s = state, r = refcount, e = error, p = pid.
1896 * 1660 *
1897 * Returns: 0 on success, -ENOBUFS when we run out of space 1661 * Returns: 0 on success, -ENOBUFS when we run out of space
1898 */ 1662 */
1899 1663
1900static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) 1664static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1901{ 1665{
1902 struct gfs2_holder *gh; 1666 const struct gfs2_glock_operations *glops = gl->gl_ops;
1903 unsigned int x; 1667 unsigned long long dtime;
1904 int error = -ENOBUFS; 1668 const struct gfs2_holder *gh;
1905 struct task_struct *gl_owner; 1669 char gflags_buf[32];
1670 int error = 0;
1906 1671
1907 spin_lock(&gl->gl_spin); 1672 dtime = jiffies - gl->gl_demote_time;
1673 dtime *= 1000000/HZ; /* demote time in uSec */
1674 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1675 dtime = 0;
1676 gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n",
1677 state2str(gl->gl_state),
1678 gl->gl_name.ln_type,
1679 (unsigned long long)gl->gl_name.ln_number,
1680 gflags2str(gflags_buf, &gl->gl_flags),
1681 state2str(gl->gl_target),
1682 state2str(gl->gl_demote_state), dtime,
1683 atomic_read(&gl->gl_lvb_count),
1684 atomic_read(&gl->gl_ail_count),
1685 atomic_read(&gl->gl_ref));
1908 1686
1909 print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1910 (unsigned long long)gl->gl_name.ln_number);
1911 print_dbg(gi, " gl_flags =");
1912 for (x = 0; x < 32; x++) {
1913 if (test_bit(x, &gl->gl_flags))
1914 print_dbg(gi, " %u", x);
1915 }
1916 if (!test_bit(GLF_LOCK, &gl->gl_flags))
1917 print_dbg(gi, " (unlocked)");
1918 print_dbg(gi, " \n");
1919 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1920 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
1921 if (gl->gl_owner_pid) {
1922 gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID);
1923 if (gl_owner)
1924 print_dbg(gi, " gl_owner = pid %d (%s)\n",
1925 pid_nr(gl->gl_owner_pid), gl_owner->comm);
1926 else
1927 print_dbg(gi, " gl_owner = %d (ended)\n",
1928 pid_nr(gl->gl_owner_pid));
1929 } else
1930 print_dbg(gi, " gl_owner = -1\n");
1931 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1932 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1933 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1934 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
1935 print_dbg(gi, " reclaim = %s\n",
1936 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1937 if (gl->gl_aspace)
1938 print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1939 gl->gl_aspace->i_mapping->nrpages);
1940 else
1941 print_dbg(gi, " aspace = no\n");
1942 print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count));
1943 if (gl->gl_req_gh) {
1944 error = dump_holder(gi, "Request", gl->gl_req_gh);
1945 if (error)
1946 goto out;
1947 }
1948 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1687 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1949 error = dump_holder(gi, "Holder", gh); 1688 error = dump_holder(seq, gh);
1950 if (error) 1689 if (error)
1951 goto out; 1690 goto out;
1952 } 1691 }
1953 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { 1692 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1954 error = dump_holder(gi, "Waiter1", gh); 1693 error = glops->go_dump(seq, gl);
1955 if (error)
1956 goto out;
1957 }
1958 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1959 error = dump_holder(gi, "Waiter3", gh);
1960 if (error)
1961 goto out;
1962 }
1963 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1964 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
1965 gl->gl_demote_state, (unsigned long long)
1966 (jiffies - gl->gl_demote_time)*(1000000/HZ));
1967 }
1968 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1969 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1970 list_empty(&gl->gl_holders)) {
1971 error = dump_inode(gi, gl->gl_object);
1972 if (error)
1973 goto out;
1974 } else {
1975 error = -ENOBUFS;
1976 print_dbg(gi, " Inode: busy\n");
1977 }
1978 }
1979
1980 error = 0;
1981
1982out: 1694out:
1983 spin_unlock(&gl->gl_spin);
1984 return error; 1695 return error;
1985} 1696}
1986 1697
1698static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1699{
1700 int ret;
1701 spin_lock(&gl->gl_spin);
1702 ret = __dump_glock(seq, gl);
1703 spin_unlock(&gl->gl_spin);
1704 return ret;
1705}
1706
1987/** 1707/**
1988 * gfs2_dump_lockstate - print out the current lockstate 1708 * gfs2_dump_lockstate - print out the current lockstate
1989 * @sdp: the filesystem 1709 * @sdp: the filesystem
@@ -2086,7 +1806,7 @@ void gfs2_glock_exit(void)
2086module_param(scand_secs, uint, S_IRUGO|S_IWUSR); 1806module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2087MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); 1807MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2088 1808
2089static int gfs2_glock_iter_next(struct glock_iter *gi) 1809static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
2090{ 1810{
2091 struct gfs2_glock *gl; 1811 struct gfs2_glock *gl;
2092 1812
@@ -2104,7 +1824,7 @@ restart:
2104 gfs2_glock_put(gl); 1824 gfs2_glock_put(gl);
2105 if (gl && gi->gl == NULL) 1825 if (gl && gi->gl == NULL)
2106 gi->hash++; 1826 gi->hash++;
2107 while(gi->gl == NULL) { 1827 while (gi->gl == NULL) {
2108 if (gi->hash >= GFS2_GL_HASH_SIZE) 1828 if (gi->hash >= GFS2_GL_HASH_SIZE)
2109 return 1; 1829 return 1;
2110 read_lock(gl_lock_addr(gi->hash)); 1830 read_lock(gl_lock_addr(gi->hash));
@@ -2122,58 +1842,34 @@ restart:
2122 return 0; 1842 return 0;
2123} 1843}
2124 1844
2125static void gfs2_glock_iter_free(struct glock_iter *gi) 1845static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
2126{ 1846{
2127 if (gi->gl) 1847 if (gi->gl)
2128 gfs2_glock_put(gi->gl); 1848 gfs2_glock_put(gi->gl);
2129 kfree(gi);
2130}
2131
2132static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2133{
2134 struct glock_iter *gi;
2135
2136 gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2137 if (!gi)
2138 return NULL;
2139
2140 gi->sdp = sdp;
2141 gi->hash = 0;
2142 gi->seq = NULL;
2143 gi->gl = NULL; 1849 gi->gl = NULL;
2144 memset(gi->string, 0, sizeof(gi->string));
2145
2146 if (gfs2_glock_iter_next(gi)) {
2147 gfs2_glock_iter_free(gi);
2148 return NULL;
2149 }
2150
2151 return gi;
2152} 1850}
2153 1851
2154static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos) 1852static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
2155{ 1853{
2156 struct glock_iter *gi; 1854 struct gfs2_glock_iter *gi = seq->private;
2157 loff_t n = *pos; 1855 loff_t n = *pos;
2158 1856
2159 gi = gfs2_glock_iter_init(file->private); 1857 gi->hash = 0;
2160 if (!gi)
2161 return NULL;
2162 1858
2163 while(n--) { 1859 do {
2164 if (gfs2_glock_iter_next(gi)) { 1860 if (gfs2_glock_iter_next(gi)) {
2165 gfs2_glock_iter_free(gi); 1861 gfs2_glock_iter_free(gi);
2166 return NULL; 1862 return NULL;
2167 } 1863 }
2168 } 1864 } while (n--);
2169 1865
2170 return gi; 1866 return gi->gl;
2171} 1867}
2172 1868
2173static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, 1869static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
2174 loff_t *pos) 1870 loff_t *pos)
2175{ 1871{
2176 struct glock_iter *gi = iter_ptr; 1872 struct gfs2_glock_iter *gi = seq->private;
2177 1873
2178 (*pos)++; 1874 (*pos)++;
2179 1875
@@ -2182,24 +1878,18 @@ static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2182 return NULL; 1878 return NULL;
2183 } 1879 }
2184 1880
2185 return gi; 1881 return gi->gl;
2186} 1882}
2187 1883
2188static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr) 1884static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
2189{ 1885{
2190 struct glock_iter *gi = iter_ptr; 1886 struct gfs2_glock_iter *gi = seq->private;
2191 if (gi) 1887 gfs2_glock_iter_free(gi);
2192 gfs2_glock_iter_free(gi);
2193} 1888}
2194 1889
2195static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr) 1890static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2196{ 1891{
2197 struct glock_iter *gi = iter_ptr; 1892 return dump_glock(seq, iter_ptr);
2198
2199 gi->seq = file;
2200 dump_glock(gi, gi->gl);
2201
2202 return 0;
2203} 1893}
2204 1894
2205static const struct seq_operations gfs2_glock_seq_ops = { 1895static const struct seq_operations gfs2_glock_seq_ops = {
@@ -2211,17 +1901,14 @@ static const struct seq_operations gfs2_glock_seq_ops = {
2211 1901
2212static int gfs2_debugfs_open(struct inode *inode, struct file *file) 1902static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2213{ 1903{
2214 struct seq_file *seq; 1904 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
2215 int ret; 1905 sizeof(struct gfs2_glock_iter));
2216 1906 if (ret == 0) {
2217 ret = seq_open(file, &gfs2_glock_seq_ops); 1907 struct seq_file *seq = file->private_data;
2218 if (ret) 1908 struct gfs2_glock_iter *gi = seq->private;
2219 return ret; 1909 gi->sdp = inode->i_private;
2220 1910 }
2221 seq = file->private_data; 1911 return ret;
2222 seq->private = inode->i_private;
2223
2224 return 0;
2225} 1912}
2226 1913
2227static const struct file_operations gfs2_debug_fops = { 1914static const struct file_operations gfs2_debug_fops = {
@@ -2229,7 +1916,7 @@ static const struct file_operations gfs2_debug_fops = {
2229 .open = gfs2_debugfs_open, 1916 .open = gfs2_debugfs_open,
2230 .read = seq_read, 1917 .read = seq_read,
2231 .llseek = seq_lseek, 1918 .llseek = seq_lseek,
2232 .release = seq_release 1919 .release = seq_release_private,
2233}; 1920};
2234 1921
2235int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 1922int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index cdad3e6f8150..971d92af70fc 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -26,11 +26,8 @@
26#define GL_SKIP 0x00000100 26#define GL_SKIP 0x00000100
27#define GL_ATIME 0x00000200 27#define GL_ATIME 0x00000200
28#define GL_NOCACHE 0x00000400 28#define GL_NOCACHE 0x00000400
29#define GL_FLOCK 0x00000800
30#define GL_NOCANCEL 0x00001000
31 29
32#define GLR_TRYFAILED 13 30#define GLR_TRYFAILED 13
33#define GLR_CANCELED 14
34 31
35static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) 32static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
36{ 33{
@@ -41,6 +38,8 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
41 spin_lock(&gl->gl_spin); 38 spin_lock(&gl->gl_spin);
42 pid = task_pid(current); 39 pid = task_pid(current);
43 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 40 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
41 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
42 break;
44 if (gh->gh_owner_pid == pid) 43 if (gh->gh_owner_pid == pid)
45 goto out; 44 goto out;
46 } 45 }
@@ -70,7 +69,7 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
70{ 69{
71 int ret; 70 int ret;
72 spin_lock(&gl->gl_spin); 71 spin_lock(&gl->gl_spin);
73 ret = test_bit(GLF_DEMOTE, &gl->gl_flags) || !list_empty(&gl->gl_waiters3); 72 ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
74 spin_unlock(&gl->gl_spin); 73 spin_unlock(&gl->gl_spin);
75 return ret; 74 return ret;
76} 75}
@@ -98,6 +97,7 @@ int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
98int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs); 97int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
99void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); 98void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
100void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs); 99void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
100void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
101 101
102/** 102/**
103 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock 103 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
@@ -130,10 +130,9 @@ int gfs2_lvb_hold(struct gfs2_glock *gl);
130void gfs2_lvb_unhold(struct gfs2_glock *gl); 130void gfs2_lvb_unhold(struct gfs2_glock *gl);
131 131
132void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); 132void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
133
134void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); 133void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
135void gfs2_reclaim_glock(struct gfs2_sbd *sdp); 134void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
136void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait); 135void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
137 136
138int __init gfs2_glock_init(void); 137int __init gfs2_glock_init(void);
139void gfs2_glock_exit(void); 138void gfs2_glock_exit(void);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 07d84d16cda4..c6c318c2a0f6 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -13,6 +13,7 @@
13#include <linux/buffer_head.h> 13#include <linux/buffer_head.h>
14#include <linux/gfs2_ondisk.h> 14#include <linux/gfs2_ondisk.h>
15#include <linux/lm_interface.h> 15#include <linux/lm_interface.h>
16#include <linux/bio.h>
16 17
17#include "gfs2.h" 18#include "gfs2.h"
18#include "incore.h" 19#include "incore.h"
@@ -172,26 +173,6 @@ static void inode_go_sync(struct gfs2_glock *gl)
172} 173}
173 174
174/** 175/**
175 * inode_go_xmote_bh - After promoting/demoting a glock
176 * @gl: the glock
177 *
178 */
179
180static void inode_go_xmote_bh(struct gfs2_glock *gl)
181{
182 struct gfs2_holder *gh = gl->gl_req_gh;
183 struct buffer_head *bh;
184 int error;
185
186 if (gl->gl_state != LM_ST_UNLOCKED &&
187 (!gh || !(gh->gh_flags & GL_SKIP))) {
188 error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
189 if (!error)
190 brelse(bh);
191 }
192}
193
194/**
195 * inode_go_inval - prepare a inode glock to be released 176 * inode_go_inval - prepare a inode glock to be released
196 * @gl: the glock 177 * @gl: the glock
197 * @flags: 178 * @flags:
@@ -267,6 +248,26 @@ static int inode_go_lock(struct gfs2_holder *gh)
267} 248}
268 249
269/** 250/**
251 * inode_go_dump - print information about an inode
252 * @seq: The iterator
253 * @ip: the inode
254 *
255 * Returns: 0 on success, -ENOBUFS when we run out of space
256 */
257
258static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
259{
260 const struct gfs2_inode *ip = gl->gl_object;
261 if (ip == NULL)
262 return 0;
263 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%08lx\n",
264 (unsigned long long)ip->i_no_formal_ino,
265 (unsigned long long)ip->i_no_addr,
266 IF2DT(ip->i_inode.i_mode), ip->i_flags);
267 return 0;
268}
269
270/**
270 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock 271 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
271 * @gl: the glock 272 * @gl: the glock
272 * 273 *
@@ -306,6 +307,22 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
306} 307}
307 308
308/** 309/**
310 * rgrp_go_dump - print out an rgrp
311 * @seq: The iterator
312 * @gl: The glock in question
313 *
314 */
315
316static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
317{
318 const struct gfs2_rgrpd *rgd = gl->gl_object;
319 if (rgd == NULL)
320 return 0;
321 gfs2_print_dbg(seq, " R: n:%llu\n", (unsigned long long)rgd->rd_addr);
322 return 0;
323}
324
325/**
309 * trans_go_sync - promote/demote the transaction glock 326 * trans_go_sync - promote/demote the transaction glock
310 * @gl: the glock 327 * @gl: the glock
311 * @state: the requested state 328 * @state: the requested state
@@ -330,7 +347,7 @@ static void trans_go_sync(struct gfs2_glock *gl)
330 * 347 *
331 */ 348 */
332 349
333static void trans_go_xmote_bh(struct gfs2_glock *gl) 350static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
334{ 351{
335 struct gfs2_sbd *sdp = gl->gl_sbd; 352 struct gfs2_sbd *sdp = gl->gl_sbd;
336 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 353 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
@@ -338,8 +355,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
338 struct gfs2_log_header_host head; 355 struct gfs2_log_header_host head;
339 int error; 356 int error;
340 357
341 if (gl->gl_state != LM_ST_UNLOCKED && 358 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
342 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
343 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 359 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
344 360
345 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 361 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
@@ -354,6 +370,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
354 gfs2_log_pointers_init(sdp, head.lh_blkno); 370 gfs2_log_pointers_init(sdp, head.lh_blkno);
355 } 371 }
356 } 372 }
373 return 0;
357} 374}
358 375
359/** 376/**
@@ -375,12 +392,12 @@ const struct gfs2_glock_operations gfs2_meta_glops = {
375 392
376const struct gfs2_glock_operations gfs2_inode_glops = { 393const struct gfs2_glock_operations gfs2_inode_glops = {
377 .go_xmote_th = inode_go_sync, 394 .go_xmote_th = inode_go_sync,
378 .go_xmote_bh = inode_go_xmote_bh,
379 .go_inval = inode_go_inval, 395 .go_inval = inode_go_inval,
380 .go_demote_ok = inode_go_demote_ok, 396 .go_demote_ok = inode_go_demote_ok,
381 .go_lock = inode_go_lock, 397 .go_lock = inode_go_lock,
398 .go_dump = inode_go_dump,
382 .go_type = LM_TYPE_INODE, 399 .go_type = LM_TYPE_INODE,
383 .go_min_hold_time = HZ / 10, 400 .go_min_hold_time = HZ / 5,
384}; 401};
385 402
386const struct gfs2_glock_operations gfs2_rgrp_glops = { 403const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -389,8 +406,9 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
389 .go_demote_ok = rgrp_go_demote_ok, 406 .go_demote_ok = rgrp_go_demote_ok,
390 .go_lock = rgrp_go_lock, 407 .go_lock = rgrp_go_lock,
391 .go_unlock = rgrp_go_unlock, 408 .go_unlock = rgrp_go_unlock,
409 .go_dump = rgrp_go_dump,
392 .go_type = LM_TYPE_RGRP, 410 .go_type = LM_TYPE_RGRP,
393 .go_min_hold_time = HZ / 10, 411 .go_min_hold_time = HZ / 5,
394}; 412};
395 413
396const struct gfs2_glock_operations gfs2_trans_glops = { 414const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index eabe5eac41da..448697a5c462 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -77,7 +77,6 @@ struct gfs2_rgrp_host {
77struct gfs2_rgrpd { 77struct gfs2_rgrpd {
78 struct list_head rd_list; /* Link with superblock */ 78 struct list_head rd_list; /* Link with superblock */
79 struct list_head rd_list_mru; 79 struct list_head rd_list_mru;
80 struct list_head rd_recent; /* Recently used rgrps */
81 struct gfs2_glock *rd_gl; /* Glock for this rgrp */ 80 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
82 u64 rd_addr; /* grp block disk address */ 81 u64 rd_addr; /* grp block disk address */
83 u64 rd_data0; /* first data location */ 82 u64 rd_data0; /* first data location */
@@ -128,20 +127,20 @@ struct gfs2_bufdata {
128 127
129struct gfs2_glock_operations { 128struct gfs2_glock_operations {
130 void (*go_xmote_th) (struct gfs2_glock *gl); 129 void (*go_xmote_th) (struct gfs2_glock *gl);
131 void (*go_xmote_bh) (struct gfs2_glock *gl); 130 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
132 void (*go_inval) (struct gfs2_glock *gl, int flags); 131 void (*go_inval) (struct gfs2_glock *gl, int flags);
133 int (*go_demote_ok) (struct gfs2_glock *gl); 132 int (*go_demote_ok) (struct gfs2_glock *gl);
134 int (*go_lock) (struct gfs2_holder *gh); 133 int (*go_lock) (struct gfs2_holder *gh);
135 void (*go_unlock) (struct gfs2_holder *gh); 134 void (*go_unlock) (struct gfs2_holder *gh);
135 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
136 const int go_type; 136 const int go_type;
137 const unsigned long go_min_hold_time; 137 const unsigned long go_min_hold_time;
138}; 138};
139 139
140enum { 140enum {
141 /* States */ 141 /* States */
142 HIF_HOLDER = 6, 142 HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
143 HIF_FIRST = 7, 143 HIF_FIRST = 7,
144 HIF_ABORTED = 9,
145 HIF_WAIT = 10, 144 HIF_WAIT = 10,
146}; 145};
147 146
@@ -154,20 +153,20 @@ struct gfs2_holder {
154 unsigned gh_flags; 153 unsigned gh_flags;
155 154
156 int gh_error; 155 int gh_error;
157 unsigned long gh_iflags; 156 unsigned long gh_iflags; /* HIF_... */
158 unsigned long gh_ip; 157 unsigned long gh_ip;
159}; 158};
160 159
161enum { 160enum {
162 GLF_LOCK = 1, 161 GLF_LOCK = 1,
163 GLF_STICKY = 2, 162 GLF_STICKY = 2,
164 GLF_DEMOTE = 3, 163 GLF_DEMOTE = 3,
165 GLF_PENDING_DEMOTE = 4, 164 GLF_PENDING_DEMOTE = 4,
166 GLF_DIRTY = 5, 165 GLF_DEMOTE_IN_PROGRESS = 5,
167 GLF_DEMOTE_IN_PROGRESS = 6, 166 GLF_DIRTY = 6,
168 GLF_LFLUSH = 7, 167 GLF_LFLUSH = 7,
169 GLF_WAITERS2 = 8, 168 GLF_INVALIDATE_IN_PROGRESS = 8,
170 GLF_CONV_DEADLK = 9, 169 GLF_REPLY_PENDING = 9,
171}; 170};
172 171
173struct gfs2_glock { 172struct gfs2_glock {
@@ -179,19 +178,14 @@ struct gfs2_glock {
179 spinlock_t gl_spin; 178 spinlock_t gl_spin;
180 179
181 unsigned int gl_state; 180 unsigned int gl_state;
181 unsigned int gl_target;
182 unsigned int gl_reply;
182 unsigned int gl_hash; 183 unsigned int gl_hash;
183 unsigned int gl_demote_state; /* state requested by remote node */ 184 unsigned int gl_demote_state; /* state requested by remote node */
184 unsigned long gl_demote_time; /* time of first demote request */ 185 unsigned long gl_demote_time; /* time of first demote request */
185 struct pid *gl_owner_pid;
186 unsigned long gl_ip;
187 struct list_head gl_holders; 186 struct list_head gl_holders;
188 struct list_head gl_waiters1; /* HIF_MUTEX */
189 struct list_head gl_waiters3; /* HIF_PROMOTE */
190 187
191 const struct gfs2_glock_operations *gl_ops; 188 const struct gfs2_glock_operations *gl_ops;
192
193 struct gfs2_holder *gl_req_gh;
194
195 void *gl_lock; 189 void *gl_lock;
196 char *gl_lvb; 190 char *gl_lvb;
197 atomic_t gl_lvb_count; 191 atomic_t gl_lvb_count;
@@ -427,7 +421,6 @@ struct gfs2_tune {
427 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */ 421 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
428 unsigned int gt_atime_quantum; /* Min secs between atime updates */ 422 unsigned int gt_atime_quantum; /* Min secs between atime updates */
429 unsigned int gt_new_files_jdata; 423 unsigned int gt_new_files_jdata;
430 unsigned int gt_new_files_directio;
431 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ 424 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
432 unsigned int gt_stall_secs; /* Detects trouble! */ 425 unsigned int gt_stall_secs; /* Detects trouble! */
433 unsigned int gt_complain_secs; 426 unsigned int gt_complain_secs;
@@ -534,7 +527,6 @@ struct gfs2_sbd {
534 struct mutex sd_rindex_mutex; 527 struct mutex sd_rindex_mutex;
535 struct list_head sd_rindex_list; 528 struct list_head sd_rindex_list;
536 struct list_head sd_rindex_mru_list; 529 struct list_head sd_rindex_mru_list;
537 struct list_head sd_rindex_recent_list;
538 struct gfs2_rgrpd *sd_rindex_forward; 530 struct gfs2_rgrpd *sd_rindex_forward;
539 unsigned int sd_rgrps; 531 unsigned int sd_rgrps;
540 532
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 09453d057e41..6da0ab355b8a 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -504,7 +504,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
504 } 504 }
505 505
506 if (!is_root) { 506 if (!is_root) {
507 error = permission(dir, MAY_EXEC, NULL); 507 error = gfs2_permission(dir, MAY_EXEC);
508 if (error) 508 if (error)
509 goto out; 509 goto out;
510 } 510 }
@@ -667,7 +667,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
667{ 667{
668 int error; 668 int error;
669 669
670 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL); 670 error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
671 if (error) 671 if (error)
672 return error; 672 return error;
673 673
@@ -789,13 +789,8 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
789 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) || 789 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
790 gfs2_tune_get(sdp, gt_new_files_jdata)) 790 gfs2_tune_get(sdp, gt_new_files_jdata))
791 di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA); 791 di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
792 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
793 gfs2_tune_get(sdp, gt_new_files_directio))
794 di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
795 } else if (S_ISDIR(mode)) { 792 } else if (S_ISDIR(mode)) {
796 di->di_flags |= cpu_to_be32(dip->i_di.di_flags & 793 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
797 GFS2_DIF_INHERIT_DIRECTIO);
798 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
799 GFS2_DIF_INHERIT_JDATA); 794 GFS2_DIF_INHERIT_JDATA);
800 } 795 }
801 796
@@ -1134,7 +1129,7 @@ int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1134 if (IS_APPEND(&dip->i_inode)) 1129 if (IS_APPEND(&dip->i_inode))
1135 return -EPERM; 1130 return -EPERM;
1136 1131
1137 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL); 1132 error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
1138 if (error) 1133 if (error)
1139 return error; 1134 return error;
1140 1135
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 580da454b38f..6074c2506f75 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -72,7 +72,6 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip,
72} 72}
73 73
74 74
75void gfs2_inode_attr_in(struct gfs2_inode *ip);
76void gfs2_set_iop(struct inode *inode); 75void gfs2_set_iop(struct inode *inode);
77struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 76struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
78 u64 no_addr, u64 no_formal_ino, 77 u64 no_addr, u64 no_formal_ino,
@@ -91,6 +90,7 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
91 struct gfs2_inode *ip); 90 struct gfs2_inode *ip);
92int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, 91int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
93 const struct gfs2_inode *ip); 92 const struct gfs2_inode *ip);
93int gfs2_permission(struct inode *inode, int mask);
94int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to); 94int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
95int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len); 95int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
96int gfs2_glock_nq_atime(struct gfs2_holder *gh); 96int gfs2_glock_nq_atime(struct gfs2_holder *gh);
diff --git a/fs/gfs2/locking.c b/fs/gfs2/locking.c
index 663fee728783..523243a13a21 100644
--- a/fs/gfs2/locking.c
+++ b/fs/gfs2/locking.c
@@ -23,12 +23,54 @@ struct lmh_wrapper {
23 const struct lm_lockops *lw_ops; 23 const struct lm_lockops *lw_ops;
24}; 24};
25 25
26static int nolock_mount(char *table_name, char *host_data,
27 lm_callback_t cb, void *cb_data,
28 unsigned int min_lvb_size, int flags,
29 struct lm_lockstruct *lockstruct,
30 struct kobject *fskobj);
31
26/* List of registered low-level locking protocols. A file system selects one 32/* List of registered low-level locking protocols. A file system selects one
27 of them by name at mount time, e.g. lock_nolock, lock_dlm. */ 33 of them by name at mount time, e.g. lock_nolock, lock_dlm. */
28 34
35static const struct lm_lockops nolock_ops = {
36 .lm_proto_name = "lock_nolock",
37 .lm_mount = nolock_mount,
38};
39
40static struct lmh_wrapper nolock_proto = {
41 .lw_list = LIST_HEAD_INIT(nolock_proto.lw_list),
42 .lw_ops = &nolock_ops,
43};
44
29static LIST_HEAD(lmh_list); 45static LIST_HEAD(lmh_list);
30static DEFINE_MUTEX(lmh_lock); 46static DEFINE_MUTEX(lmh_lock);
31 47
48static int nolock_mount(char *table_name, char *host_data,
49 lm_callback_t cb, void *cb_data,
50 unsigned int min_lvb_size, int flags,
51 struct lm_lockstruct *lockstruct,
52 struct kobject *fskobj)
53{
54 char *c;
55 unsigned int jid;
56
57 c = strstr(host_data, "jid=");
58 if (!c)
59 jid = 0;
60 else {
61 c += 4;
62 sscanf(c, "%u", &jid);
63 }
64
65 lockstruct->ls_jid = jid;
66 lockstruct->ls_first = 1;
67 lockstruct->ls_lvb_size = min_lvb_size;
68 lockstruct->ls_ops = &nolock_ops;
69 lockstruct->ls_flags = LM_LSFLAG_LOCAL;
70
71 return 0;
72}
73
32/** 74/**
33 * gfs2_register_lockproto - Register a low-level locking protocol 75 * gfs2_register_lockproto - Register a low-level locking protocol
34 * @proto: the protocol definition 76 * @proto: the protocol definition
@@ -116,9 +158,13 @@ int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
116 int try = 0; 158 int try = 0;
117 int error, found; 159 int error, found;
118 160
161
119retry: 162retry:
120 mutex_lock(&lmh_lock); 163 mutex_lock(&lmh_lock);
121 164
165 if (list_empty(&nolock_proto.lw_list))
166 list_add(&nolock_proto.lw_list, &lmh_list);
167
122 found = 0; 168 found = 0;
123 list_for_each_entry(lw, &lmh_list, lw_list) { 169 list_for_each_entry(lw, &lmh_list, lw_list) {
124 if (!strcmp(lw->lw_ops->lm_proto_name, proto_name)) { 170 if (!strcmp(lw->lw_ops->lm_proto_name, proto_name)) {
@@ -139,7 +185,8 @@ retry:
139 goto out; 185 goto out;
140 } 186 }
141 187
142 if (!try_module_get(lw->lw_ops->lm_owner)) { 188 if (lw->lw_ops->lm_owner &&
189 !try_module_get(lw->lw_ops->lm_owner)) {
143 try = 0; 190 try = 0;
144 mutex_unlock(&lmh_lock); 191 mutex_unlock(&lmh_lock);
145 msleep(1000); 192 msleep(1000);
@@ -158,7 +205,8 @@ out:
158void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct) 205void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct)
159{ 206{
160 mutex_lock(&lmh_lock); 207 mutex_lock(&lmh_lock);
161 lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace); 208 if (lockstruct->ls_ops->lm_unmount)
209 lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
162 if (lockstruct->ls_ops->lm_owner) 210 if (lockstruct->ls_ops->lm_owner)
163 module_put(lockstruct->ls_ops->lm_owner); 211 module_put(lockstruct->ls_ops->lm_owner);
164 mutex_unlock(&lmh_lock); 212 mutex_unlock(&lmh_lock);
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
index cf7ea8abec87..2482c9047505 100644
--- a/fs/gfs2/locking/dlm/lock.c
+++ b/fs/gfs2/locking/dlm/lock.c
@@ -11,46 +11,60 @@
11 11
12static char junk_lvb[GDLM_LVB_SIZE]; 12static char junk_lvb[GDLM_LVB_SIZE];
13 13
14static void queue_complete(struct gdlm_lock *lp) 14
15/* convert dlm lock-mode to gfs lock-state */
16
17static s16 gdlm_make_lmstate(s16 dlmmode)
15{ 18{
16 struct gdlm_ls *ls = lp->ls; 19 switch (dlmmode) {
20 case DLM_LOCK_IV:
21 case DLM_LOCK_NL:
22 return LM_ST_UNLOCKED;
23 case DLM_LOCK_EX:
24 return LM_ST_EXCLUSIVE;
25 case DLM_LOCK_CW:
26 return LM_ST_DEFERRED;
27 case DLM_LOCK_PR:
28 return LM_ST_SHARED;
29 }
30 gdlm_assert(0, "unknown DLM mode %d", dlmmode);
31 return -1;
32}
17 33
18 clear_bit(LFL_ACTIVE, &lp->flags); 34/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
35 thread gets to it. */
36
37static void queue_submit(struct gdlm_lock *lp)
38{
39 struct gdlm_ls *ls = lp->ls;
19 40
20 spin_lock(&ls->async_lock); 41 spin_lock(&ls->async_lock);
21 list_add_tail(&lp->clist, &ls->complete); 42 list_add_tail(&lp->delay_list, &ls->submit);
22 spin_unlock(&ls->async_lock); 43 spin_unlock(&ls->async_lock);
23 wake_up(&ls->thread_wait); 44 wake_up(&ls->thread_wait);
24} 45}
25 46
26static inline void gdlm_ast(void *astarg) 47static void wake_up_ast(struct gdlm_lock *lp)
27{ 48{
28 queue_complete(astarg); 49 clear_bit(LFL_AST_WAIT, &lp->flags);
50 smp_mb__after_clear_bit();
51 wake_up_bit(&lp->flags, LFL_AST_WAIT);
29} 52}
30 53
31static inline void gdlm_bast(void *astarg, int mode) 54static void gdlm_delete_lp(struct gdlm_lock *lp)
32{ 55{
33 struct gdlm_lock *lp = astarg;
34 struct gdlm_ls *ls = lp->ls; 56 struct gdlm_ls *ls = lp->ls;
35 57
36 if (!mode) {
37 printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
38 lp->lockname.ln_type,
39 (unsigned long long)lp->lockname.ln_number);
40 return;
41 }
42
43 spin_lock(&ls->async_lock); 58 spin_lock(&ls->async_lock);
44 if (!lp->bast_mode) { 59 if (!list_empty(&lp->delay_list))
45 list_add_tail(&lp->blist, &ls->blocking); 60 list_del_init(&lp->delay_list);
46 lp->bast_mode = mode; 61 ls->all_locks_count--;
47 } else if (lp->bast_mode < mode)
48 lp->bast_mode = mode;
49 spin_unlock(&ls->async_lock); 62 spin_unlock(&ls->async_lock);
50 wake_up(&ls->thread_wait); 63
64 kfree(lp);
51} 65}
52 66
53void gdlm_queue_delayed(struct gdlm_lock *lp) 67static void gdlm_queue_delayed(struct gdlm_lock *lp)
54{ 68{
55 struct gdlm_ls *ls = lp->ls; 69 struct gdlm_ls *ls = lp->ls;
56 70
@@ -59,6 +73,236 @@ void gdlm_queue_delayed(struct gdlm_lock *lp)
59 spin_unlock(&ls->async_lock); 73 spin_unlock(&ls->async_lock);
60} 74}
61 75
76static void process_complete(struct gdlm_lock *lp)
77{
78 struct gdlm_ls *ls = lp->ls;
79 struct lm_async_cb acb;
80
81 memset(&acb, 0, sizeof(acb));
82
83 if (lp->lksb.sb_status == -DLM_ECANCEL) {
84 log_info("complete dlm cancel %x,%llx flags %lx",
85 lp->lockname.ln_type,
86 (unsigned long long)lp->lockname.ln_number,
87 lp->flags);
88
89 lp->req = lp->cur;
90 acb.lc_ret |= LM_OUT_CANCELED;
91 if (lp->cur == DLM_LOCK_IV)
92 lp->lksb.sb_lkid = 0;
93 goto out;
94 }
95
96 if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
97 if (lp->lksb.sb_status != -DLM_EUNLOCK) {
98 log_info("unlock sb_status %d %x,%llx flags %lx",
99 lp->lksb.sb_status, lp->lockname.ln_type,
100 (unsigned long long)lp->lockname.ln_number,
101 lp->flags);
102 return;
103 }
104
105 lp->cur = DLM_LOCK_IV;
106 lp->req = DLM_LOCK_IV;
107 lp->lksb.sb_lkid = 0;
108
109 if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
110 gdlm_delete_lp(lp);
111 return;
112 }
113 goto out;
114 }
115
116 if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
117 memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
118
119 if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
120 if (lp->req == DLM_LOCK_PR)
121 lp->req = DLM_LOCK_CW;
122 else if (lp->req == DLM_LOCK_CW)
123 lp->req = DLM_LOCK_PR;
124 }
125
126 /*
127 * A canceled lock request. The lock was just taken off the delayed
128 * list and was never even submitted to dlm.
129 */
130
131 if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
132 log_info("complete internal cancel %x,%llx",
133 lp->lockname.ln_type,
134 (unsigned long long)lp->lockname.ln_number);
135 lp->req = lp->cur;
136 acb.lc_ret |= LM_OUT_CANCELED;
137 goto out;
138 }
139
140 /*
141 * An error occured.
142 */
143
144 if (lp->lksb.sb_status) {
145 /* a "normal" error */
146 if ((lp->lksb.sb_status == -EAGAIN) &&
147 (lp->lkf & DLM_LKF_NOQUEUE)) {
148 lp->req = lp->cur;
149 if (lp->cur == DLM_LOCK_IV)
150 lp->lksb.sb_lkid = 0;
151 goto out;
152 }
153
154 /* this could only happen with cancels I think */
155 log_info("ast sb_status %d %x,%llx flags %lx",
156 lp->lksb.sb_status, lp->lockname.ln_type,
157 (unsigned long long)lp->lockname.ln_number,
158 lp->flags);
159 return;
160 }
161
162 /*
163 * This is an AST for an EX->EX conversion for sync_lvb from GFS.
164 */
165
166 if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
167 wake_up_ast(lp);
168 return;
169 }
170
171 /*
172 * A lock has been demoted to NL because it initially completed during
173 * BLOCK_LOCKS. Now it must be requested in the originally requested
174 * mode.
175 */
176
177 if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
178 gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
179 lp->lockname.ln_type,
180 (unsigned long long)lp->lockname.ln_number);
181 gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
182 lp->lockname.ln_type,
183 (unsigned long long)lp->lockname.ln_number);
184
185 lp->cur = DLM_LOCK_NL;
186 lp->req = lp->prev_req;
187 lp->prev_req = DLM_LOCK_IV;
188 lp->lkf &= ~DLM_LKF_CONVDEADLK;
189
190 set_bit(LFL_NOCACHE, &lp->flags);
191
192 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
193 !test_bit(LFL_NOBLOCK, &lp->flags))
194 gdlm_queue_delayed(lp);
195 else
196 queue_submit(lp);
197 return;
198 }
199
200 /*
201 * A request is granted during dlm recovery. It may be granted
202 * because the locks of a failed node were cleared. In that case,
203 * there may be inconsistent data beneath this lock and we must wait
204 * for recovery to complete to use it. When gfs recovery is done this
205 * granted lock will be converted to NL and then reacquired in this
206 * granted state.
207 */
208
209 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
210 !test_bit(LFL_NOBLOCK, &lp->flags) &&
211 lp->req != DLM_LOCK_NL) {
212
213 lp->cur = lp->req;
214 lp->prev_req = lp->req;
215 lp->req = DLM_LOCK_NL;
216 lp->lkf |= DLM_LKF_CONVERT;
217 lp->lkf &= ~DLM_LKF_CONVDEADLK;
218
219 log_debug("rereq %x,%llx id %x %d,%d",
220 lp->lockname.ln_type,
221 (unsigned long long)lp->lockname.ln_number,
222 lp->lksb.sb_lkid, lp->cur, lp->req);
223
224 set_bit(LFL_REREQUEST, &lp->flags);
225 queue_submit(lp);
226 return;
227 }
228
229 /*
230 * DLM demoted the lock to NL before it was granted so GFS must be
231 * told it cannot cache data for this lock.
232 */
233
234 if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
235 set_bit(LFL_NOCACHE, &lp->flags);
236
237out:
238 /*
239 * This is an internal lock_dlm lock
240 */
241
242 if (test_bit(LFL_INLOCK, &lp->flags)) {
243 clear_bit(LFL_NOBLOCK, &lp->flags);
244 lp->cur = lp->req;
245 wake_up_ast(lp);
246 return;
247 }
248
249 /*
250 * Normal completion of a lock request. Tell GFS it now has the lock.
251 */
252
253 clear_bit(LFL_NOBLOCK, &lp->flags);
254 lp->cur = lp->req;
255
256 acb.lc_name = lp->lockname;
257 acb.lc_ret |= gdlm_make_lmstate(lp->cur);
258
259 ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
260}
261
262static void gdlm_ast(void *astarg)
263{
264 struct gdlm_lock *lp = astarg;
265 clear_bit(LFL_ACTIVE, &lp->flags);
266 process_complete(lp);
267}
268
269static void process_blocking(struct gdlm_lock *lp, int bast_mode)
270{
271 struct gdlm_ls *ls = lp->ls;
272 unsigned int cb = 0;
273
274 switch (gdlm_make_lmstate(bast_mode)) {
275 case LM_ST_EXCLUSIVE:
276 cb = LM_CB_NEED_E;
277 break;
278 case LM_ST_DEFERRED:
279 cb = LM_CB_NEED_D;
280 break;
281 case LM_ST_SHARED:
282 cb = LM_CB_NEED_S;
283 break;
284 default:
285 gdlm_assert(0, "unknown bast mode %u", bast_mode);
286 }
287
288 ls->fscb(ls->sdp, cb, &lp->lockname);
289}
290
291
292static void gdlm_bast(void *astarg, int mode)
293{
294 struct gdlm_lock *lp = astarg;
295
296 if (!mode) {
297 printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
298 lp->lockname.ln_type,
299 (unsigned long long)lp->lockname.ln_number);
300 return;
301 }
302
303 process_blocking(lp, mode);
304}
305
62/* convert gfs lock-state to dlm lock-mode */ 306/* convert gfs lock-state to dlm lock-mode */
63 307
64static s16 make_mode(s16 lmstate) 308static s16 make_mode(s16 lmstate)
@@ -77,24 +321,6 @@ static s16 make_mode(s16 lmstate)
77 return -1; 321 return -1;
78} 322}
79 323
80/* convert dlm lock-mode to gfs lock-state */
81
82s16 gdlm_make_lmstate(s16 dlmmode)
83{
84 switch (dlmmode) {
85 case DLM_LOCK_IV:
86 case DLM_LOCK_NL:
87 return LM_ST_UNLOCKED;
88 case DLM_LOCK_EX:
89 return LM_ST_EXCLUSIVE;
90 case DLM_LOCK_CW:
91 return LM_ST_DEFERRED;
92 case DLM_LOCK_PR:
93 return LM_ST_SHARED;
94 }
95 gdlm_assert(0, "unknown DLM mode %d", dlmmode);
96 return -1;
97}
98 324
99/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and 325/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
100 DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */ 326 DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
@@ -134,14 +360,6 @@ static inline unsigned int make_flags(struct gdlm_lock *lp,
134 360
135 if (lp->lksb.sb_lkid != 0) { 361 if (lp->lksb.sb_lkid != 0) {
136 lkf |= DLM_LKF_CONVERT; 362 lkf |= DLM_LKF_CONVERT;
137
138 /* Conversion deadlock avoidance by DLM */
139
140 if (!(lp->ls->fsflags & LM_MFLAG_CONV_NODROP) &&
141 !test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
142 !(lkf & DLM_LKF_NOQUEUE) &&
143 cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
144 lkf |= DLM_LKF_CONVDEADLK;
145 } 363 }
146 364
147 if (lp->lvb) 365 if (lp->lvb)
@@ -173,14 +391,9 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
173 make_strname(name, &lp->strname); 391 make_strname(name, &lp->strname);
174 lp->ls = ls; 392 lp->ls = ls;
175 lp->cur = DLM_LOCK_IV; 393 lp->cur = DLM_LOCK_IV;
176 lp->lvb = NULL;
177 lp->hold_null = NULL;
178 INIT_LIST_HEAD(&lp->clist);
179 INIT_LIST_HEAD(&lp->blist);
180 INIT_LIST_HEAD(&lp->delay_list); 394 INIT_LIST_HEAD(&lp->delay_list);
181 395
182 spin_lock(&ls->async_lock); 396 spin_lock(&ls->async_lock);
183 list_add(&lp->all_list, &ls->all_locks);
184 ls->all_locks_count++; 397 ls->all_locks_count++;
185 spin_unlock(&ls->async_lock); 398 spin_unlock(&ls->async_lock);
186 399
@@ -188,26 +401,6 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
188 return 0; 401 return 0;
189} 402}
190 403
191void gdlm_delete_lp(struct gdlm_lock *lp)
192{
193 struct gdlm_ls *ls = lp->ls;
194
195 spin_lock(&ls->async_lock);
196 if (!list_empty(&lp->clist))
197 list_del_init(&lp->clist);
198 if (!list_empty(&lp->blist))
199 list_del_init(&lp->blist);
200 if (!list_empty(&lp->delay_list))
201 list_del_init(&lp->delay_list);
202 gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
203 (unsigned long long)lp->lockname.ln_number);
204 list_del_init(&lp->all_list);
205 ls->all_locks_count--;
206 spin_unlock(&ls->async_lock);
207
208 kfree(lp);
209}
210
211int gdlm_get_lock(void *lockspace, struct lm_lockname *name, 404int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
212 void **lockp) 405 void **lockp)
213{ 406{
@@ -261,7 +454,7 @@ unsigned int gdlm_do_lock(struct gdlm_lock *lp)
261 454
262 if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) { 455 if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
263 lp->lksb.sb_status = -EAGAIN; 456 lp->lksb.sb_status = -EAGAIN;
264 queue_complete(lp); 457 gdlm_ast(lp);
265 error = 0; 458 error = 0;
266 } 459 }
267 460
@@ -308,6 +501,12 @@ unsigned int gdlm_lock(void *lock, unsigned int cur_state,
308{ 501{
309 struct gdlm_lock *lp = lock; 502 struct gdlm_lock *lp = lock;
310 503
504 if (req_state == LM_ST_UNLOCKED)
505 return gdlm_unlock(lock, cur_state);
506
507 if (req_state == LM_ST_UNLOCKED)
508 return gdlm_unlock(lock, cur_state);
509
311 clear_bit(LFL_DLM_CANCEL, &lp->flags); 510 clear_bit(LFL_DLM_CANCEL, &lp->flags);
312 if (flags & LM_FLAG_NOEXP) 511 if (flags & LM_FLAG_NOEXP)
313 set_bit(LFL_NOBLOCK, &lp->flags); 512 set_bit(LFL_NOBLOCK, &lp->flags);
@@ -351,7 +550,7 @@ void gdlm_cancel(void *lock)
351 if (delay_list) { 550 if (delay_list) {
352 set_bit(LFL_CANCEL, &lp->flags); 551 set_bit(LFL_CANCEL, &lp->flags);
353 set_bit(LFL_ACTIVE, &lp->flags); 552 set_bit(LFL_ACTIVE, &lp->flags);
354 queue_complete(lp); 553 gdlm_ast(lp);
355 return; 554 return;
356 } 555 }
357 556
@@ -507,22 +706,3 @@ void gdlm_submit_delayed(struct gdlm_ls *ls)
507 wake_up(&ls->thread_wait); 706 wake_up(&ls->thread_wait);
508} 707}
509 708
510int gdlm_release_all_locks(struct gdlm_ls *ls)
511{
512 struct gdlm_lock *lp, *safe;
513 int count = 0;
514
515 spin_lock(&ls->async_lock);
516 list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
517 list_del_init(&lp->all_list);
518
519 if (lp->lvb && lp->lvb != junk_lvb)
520 kfree(lp->lvb);
521 kfree(lp);
522 count++;
523 }
524 spin_unlock(&ls->async_lock);
525
526 return count;
527}
528
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index a243cf69c54e..3c98e7c6f93b 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -72,19 +72,12 @@ struct gdlm_ls {
72 int recover_jid_done; 72 int recover_jid_done;
73 int recover_jid_status; 73 int recover_jid_status;
74 spinlock_t async_lock; 74 spinlock_t async_lock;
75 struct list_head complete;
76 struct list_head blocking;
77 struct list_head delayed; 75 struct list_head delayed;
78 struct list_head submit; 76 struct list_head submit;
79 struct list_head all_locks;
80 u32 all_locks_count; 77 u32 all_locks_count;
81 wait_queue_head_t wait_control; 78 wait_queue_head_t wait_control;
82 struct task_struct *thread1; 79 struct task_struct *thread;
83 struct task_struct *thread2;
84 wait_queue_head_t thread_wait; 80 wait_queue_head_t thread_wait;
85 unsigned long drop_time;
86 int drop_locks_count;
87 int drop_locks_period;
88}; 81};
89 82
90enum { 83enum {
@@ -117,12 +110,7 @@ struct gdlm_lock {
117 u32 lkf; /* dlm flags DLM_LKF_ */ 110 u32 lkf; /* dlm flags DLM_LKF_ */
118 unsigned long flags; /* lock_dlm flags LFL_ */ 111 unsigned long flags; /* lock_dlm flags LFL_ */
119 112
120 int bast_mode; /* protected by async_lock */
121
122 struct list_head clist; /* complete */
123 struct list_head blist; /* blocking */
124 struct list_head delay_list; /* delayed */ 113 struct list_head delay_list; /* delayed */
125 struct list_head all_list; /* all locks for the fs */
126 struct gdlm_lock *hold_null; /* NL lock for hold_lvb */ 114 struct gdlm_lock *hold_null; /* NL lock for hold_lvb */
127}; 115};
128 116
@@ -159,11 +147,7 @@ void gdlm_release_threads(struct gdlm_ls *);
159 147
160/* lock.c */ 148/* lock.c */
161 149
162s16 gdlm_make_lmstate(s16);
163void gdlm_queue_delayed(struct gdlm_lock *);
164void gdlm_submit_delayed(struct gdlm_ls *); 150void gdlm_submit_delayed(struct gdlm_ls *);
165int gdlm_release_all_locks(struct gdlm_ls *);
166void gdlm_delete_lp(struct gdlm_lock *);
167unsigned int gdlm_do_lock(struct gdlm_lock *); 151unsigned int gdlm_do_lock(struct gdlm_lock *);
168 152
169int gdlm_get_lock(void *, struct lm_lockname *, void **); 153int gdlm_get_lock(void *, struct lm_lockname *, void **);
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index 470bdf650b50..09d78c216f48 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -22,22 +22,14 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
22 if (!ls) 22 if (!ls)
23 return NULL; 23 return NULL;
24 24
25 ls->drop_locks_count = GDLM_DROP_COUNT;
26 ls->drop_locks_period = GDLM_DROP_PERIOD;
27 ls->fscb = cb; 25 ls->fscb = cb;
28 ls->sdp = sdp; 26 ls->sdp = sdp;
29 ls->fsflags = flags; 27 ls->fsflags = flags;
30 spin_lock_init(&ls->async_lock); 28 spin_lock_init(&ls->async_lock);
31 INIT_LIST_HEAD(&ls->complete);
32 INIT_LIST_HEAD(&ls->blocking);
33 INIT_LIST_HEAD(&ls->delayed); 29 INIT_LIST_HEAD(&ls->delayed);
34 INIT_LIST_HEAD(&ls->submit); 30 INIT_LIST_HEAD(&ls->submit);
35 INIT_LIST_HEAD(&ls->all_locks);
36 init_waitqueue_head(&ls->thread_wait); 31 init_waitqueue_head(&ls->thread_wait);
37 init_waitqueue_head(&ls->wait_control); 32 init_waitqueue_head(&ls->wait_control);
38 ls->thread1 = NULL;
39 ls->thread2 = NULL;
40 ls->drop_time = jiffies;
41 ls->jid = -1; 33 ls->jid = -1;
42 34
43 strncpy(buf, table_name, 256); 35 strncpy(buf, table_name, 256);
@@ -180,7 +172,6 @@ out:
180static void gdlm_unmount(void *lockspace) 172static void gdlm_unmount(void *lockspace)
181{ 173{
182 struct gdlm_ls *ls = lockspace; 174 struct gdlm_ls *ls = lockspace;
183 int rv;
184 175
185 log_debug("unmount flags %lx", ls->flags); 176 log_debug("unmount flags %lx", ls->flags);
186 177
@@ -194,9 +185,7 @@ static void gdlm_unmount(void *lockspace)
194 gdlm_kobject_release(ls); 185 gdlm_kobject_release(ls);
195 dlm_release_lockspace(ls->dlm_lockspace, 2); 186 dlm_release_lockspace(ls->dlm_lockspace, 2);
196 gdlm_release_threads(ls); 187 gdlm_release_threads(ls);
197 rv = gdlm_release_all_locks(ls); 188 BUG_ON(ls->all_locks_count);
198 if (rv)
199 log_info("gdlm_unmount: %d stray locks freed", rv);
200out: 189out:
201 kfree(ls); 190 kfree(ls);
202} 191}
@@ -232,7 +221,6 @@ static void gdlm_withdraw(void *lockspace)
232 221
233 dlm_release_lockspace(ls->dlm_lockspace, 2); 222 dlm_release_lockspace(ls->dlm_lockspace, 2);
234 gdlm_release_threads(ls); 223 gdlm_release_threads(ls);
235 gdlm_release_all_locks(ls);
236 gdlm_kobject_release(ls); 224 gdlm_kobject_release(ls);
237} 225}
238 226
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
index a4ff271df9ee..4ec571c3d8a9 100644
--- a/fs/gfs2/locking/dlm/sysfs.c
+++ b/fs/gfs2/locking/dlm/sysfs.c
@@ -114,17 +114,6 @@ static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf)
114 return sprintf(buf, "%d\n", ls->recover_jid_status); 114 return sprintf(buf, "%d\n", ls->recover_jid_status);
115} 115}
116 116
117static ssize_t drop_count_show(struct gdlm_ls *ls, char *buf)
118{
119 return sprintf(buf, "%d\n", ls->drop_locks_count);
120}
121
122static ssize_t drop_count_store(struct gdlm_ls *ls, const char *buf, size_t len)
123{
124 ls->drop_locks_count = simple_strtol(buf, NULL, 0);
125 return len;
126}
127
128struct gdlm_attr { 117struct gdlm_attr {
129 struct attribute attr; 118 struct attribute attr;
130 ssize_t (*show)(struct gdlm_ls *, char *); 119 ssize_t (*show)(struct gdlm_ls *, char *);
@@ -144,7 +133,6 @@ GDLM_ATTR(first_done, 0444, first_done_show, NULL);
144GDLM_ATTR(recover, 0644, recover_show, recover_store); 133GDLM_ATTR(recover, 0644, recover_show, recover_store);
145GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); 134GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
146GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); 135GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
147GDLM_ATTR(drop_count, 0644, drop_count_show, drop_count_store);
148 136
149static struct attribute *gdlm_attrs[] = { 137static struct attribute *gdlm_attrs[] = {
150 &gdlm_attr_proto_name.attr, 138 &gdlm_attr_proto_name.attr,
@@ -157,7 +145,6 @@ static struct attribute *gdlm_attrs[] = {
157 &gdlm_attr_recover.attr, 145 &gdlm_attr_recover.attr,
158 &gdlm_attr_recover_done.attr, 146 &gdlm_attr_recover_done.attr,
159 &gdlm_attr_recover_status.attr, 147 &gdlm_attr_recover_status.attr,
160 &gdlm_attr_drop_count.attr,
161 NULL, 148 NULL,
162}; 149};
163 150
diff --git a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c
index e53db6fd28ab..38823efd698c 100644
--- a/fs/gfs2/locking/dlm/thread.c
+++ b/fs/gfs2/locking/dlm/thread.c
@@ -9,367 +9,60 @@
9 9
10#include "lock_dlm.h" 10#include "lock_dlm.h"
11 11
12/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm 12static inline int no_work(struct gdlm_ls *ls)
13 thread gets to it. */
14
15static void queue_submit(struct gdlm_lock *lp)
16{
17 struct gdlm_ls *ls = lp->ls;
18
19 spin_lock(&ls->async_lock);
20 list_add_tail(&lp->delay_list, &ls->submit);
21 spin_unlock(&ls->async_lock);
22 wake_up(&ls->thread_wait);
23}
24
25static void process_blocking(struct gdlm_lock *lp, int bast_mode)
26{
27 struct gdlm_ls *ls = lp->ls;
28 unsigned int cb = 0;
29
30 switch (gdlm_make_lmstate(bast_mode)) {
31 case LM_ST_EXCLUSIVE:
32 cb = LM_CB_NEED_E;
33 break;
34 case LM_ST_DEFERRED:
35 cb = LM_CB_NEED_D;
36 break;
37 case LM_ST_SHARED:
38 cb = LM_CB_NEED_S;
39 break;
40 default:
41 gdlm_assert(0, "unknown bast mode %u", lp->bast_mode);
42 }
43
44 ls->fscb(ls->sdp, cb, &lp->lockname);
45}
46
47static void wake_up_ast(struct gdlm_lock *lp)
48{
49 clear_bit(LFL_AST_WAIT, &lp->flags);
50 smp_mb__after_clear_bit();
51 wake_up_bit(&lp->flags, LFL_AST_WAIT);
52}
53
54static void process_complete(struct gdlm_lock *lp)
55{
56 struct gdlm_ls *ls = lp->ls;
57 struct lm_async_cb acb;
58 s16 prev_mode = lp->cur;
59
60 memset(&acb, 0, sizeof(acb));
61
62 if (lp->lksb.sb_status == -DLM_ECANCEL) {
63 log_info("complete dlm cancel %x,%llx flags %lx",
64 lp->lockname.ln_type,
65 (unsigned long long)lp->lockname.ln_number,
66 lp->flags);
67
68 lp->req = lp->cur;
69 acb.lc_ret |= LM_OUT_CANCELED;
70 if (lp->cur == DLM_LOCK_IV)
71 lp->lksb.sb_lkid = 0;
72 goto out;
73 }
74
75 if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
76 if (lp->lksb.sb_status != -DLM_EUNLOCK) {
77 log_info("unlock sb_status %d %x,%llx flags %lx",
78 lp->lksb.sb_status, lp->lockname.ln_type,
79 (unsigned long long)lp->lockname.ln_number,
80 lp->flags);
81 return;
82 }
83
84 lp->cur = DLM_LOCK_IV;
85 lp->req = DLM_LOCK_IV;
86 lp->lksb.sb_lkid = 0;
87
88 if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
89 gdlm_delete_lp(lp);
90 return;
91 }
92 goto out;
93 }
94
95 if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
96 memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
97
98 if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
99 if (lp->req == DLM_LOCK_PR)
100 lp->req = DLM_LOCK_CW;
101 else if (lp->req == DLM_LOCK_CW)
102 lp->req = DLM_LOCK_PR;
103 }
104
105 /*
106 * A canceled lock request. The lock was just taken off the delayed
107 * list and was never even submitted to dlm.
108 */
109
110 if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
111 log_info("complete internal cancel %x,%llx",
112 lp->lockname.ln_type,
113 (unsigned long long)lp->lockname.ln_number);
114 lp->req = lp->cur;
115 acb.lc_ret |= LM_OUT_CANCELED;
116 goto out;
117 }
118
119 /*
120 * An error occured.
121 */
122
123 if (lp->lksb.sb_status) {
124 /* a "normal" error */
125 if ((lp->lksb.sb_status == -EAGAIN) &&
126 (lp->lkf & DLM_LKF_NOQUEUE)) {
127 lp->req = lp->cur;
128 if (lp->cur == DLM_LOCK_IV)
129 lp->lksb.sb_lkid = 0;
130 goto out;
131 }
132
133 /* this could only happen with cancels I think */
134 log_info("ast sb_status %d %x,%llx flags %lx",
135 lp->lksb.sb_status, lp->lockname.ln_type,
136 (unsigned long long)lp->lockname.ln_number,
137 lp->flags);
138 if (lp->lksb.sb_status == -EDEADLOCK &&
139 lp->ls->fsflags & LM_MFLAG_CONV_NODROP) {
140 lp->req = lp->cur;
141 acb.lc_ret |= LM_OUT_CONV_DEADLK;
142 if (lp->cur == DLM_LOCK_IV)
143 lp->lksb.sb_lkid = 0;
144 goto out;
145 } else
146 return;
147 }
148
149 /*
150 * This is an AST for an EX->EX conversion for sync_lvb from GFS.
151 */
152
153 if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
154 wake_up_ast(lp);
155 return;
156 }
157
158 /*
159 * A lock has been demoted to NL because it initially completed during
160 * BLOCK_LOCKS. Now it must be requested in the originally requested
161 * mode.
162 */
163
164 if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
165 gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
166 lp->lockname.ln_type,
167 (unsigned long long)lp->lockname.ln_number);
168 gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
169 lp->lockname.ln_type,
170 (unsigned long long)lp->lockname.ln_number);
171
172 lp->cur = DLM_LOCK_NL;
173 lp->req = lp->prev_req;
174 lp->prev_req = DLM_LOCK_IV;
175 lp->lkf &= ~DLM_LKF_CONVDEADLK;
176
177 set_bit(LFL_NOCACHE, &lp->flags);
178
179 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
180 !test_bit(LFL_NOBLOCK, &lp->flags))
181 gdlm_queue_delayed(lp);
182 else
183 queue_submit(lp);
184 return;
185 }
186
187 /*
188 * A request is granted during dlm recovery. It may be granted
189 * because the locks of a failed node were cleared. In that case,
190 * there may be inconsistent data beneath this lock and we must wait
191 * for recovery to complete to use it. When gfs recovery is done this
192 * granted lock will be converted to NL and then reacquired in this
193 * granted state.
194 */
195
196 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
197 !test_bit(LFL_NOBLOCK, &lp->flags) &&
198 lp->req != DLM_LOCK_NL) {
199
200 lp->cur = lp->req;
201 lp->prev_req = lp->req;
202 lp->req = DLM_LOCK_NL;
203 lp->lkf |= DLM_LKF_CONVERT;
204 lp->lkf &= ~DLM_LKF_CONVDEADLK;
205
206 log_debug("rereq %x,%llx id %x %d,%d",
207 lp->lockname.ln_type,
208 (unsigned long long)lp->lockname.ln_number,
209 lp->lksb.sb_lkid, lp->cur, lp->req);
210
211 set_bit(LFL_REREQUEST, &lp->flags);
212 queue_submit(lp);
213 return;
214 }
215
216 /*
217 * DLM demoted the lock to NL before it was granted so GFS must be
218 * told it cannot cache data for this lock.
219 */
220
221 if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
222 set_bit(LFL_NOCACHE, &lp->flags);
223
224out:
225 /*
226 * This is an internal lock_dlm lock
227 */
228
229 if (test_bit(LFL_INLOCK, &lp->flags)) {
230 clear_bit(LFL_NOBLOCK, &lp->flags);
231 lp->cur = lp->req;
232 wake_up_ast(lp);
233 return;
234 }
235
236 /*
237 * Normal completion of a lock request. Tell GFS it now has the lock.
238 */
239
240 clear_bit(LFL_NOBLOCK, &lp->flags);
241 lp->cur = lp->req;
242
243 acb.lc_name = lp->lockname;
244 acb.lc_ret |= gdlm_make_lmstate(lp->cur);
245
246 if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
247 (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
248 acb.lc_ret |= LM_OUT_CACHEABLE;
249
250 ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
251}
252
253static inline int no_work(struct gdlm_ls *ls, int blocking)
254{ 13{
255 int ret; 14 int ret;
256 15
257 spin_lock(&ls->async_lock); 16 spin_lock(&ls->async_lock);
258 ret = list_empty(&ls->complete) && list_empty(&ls->submit); 17 ret = list_empty(&ls->submit);
259 if (ret && blocking)
260 ret = list_empty(&ls->blocking);
261 spin_unlock(&ls->async_lock); 18 spin_unlock(&ls->async_lock);
262 19
263 return ret; 20 return ret;
264} 21}
265 22
266static inline int check_drop(struct gdlm_ls *ls) 23static int gdlm_thread(void *data)
267{
268 if (!ls->drop_locks_count)
269 return 0;
270
271 if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) {
272 ls->drop_time = jiffies;
273 if (ls->all_locks_count >= ls->drop_locks_count)
274 return 1;
275 }
276 return 0;
277}
278
279static int gdlm_thread(void *data, int blist)
280{ 24{
281 struct gdlm_ls *ls = (struct gdlm_ls *) data; 25 struct gdlm_ls *ls = (struct gdlm_ls *) data;
282 struct gdlm_lock *lp = NULL; 26 struct gdlm_lock *lp = NULL;
283 uint8_t complete, blocking, submit, drop;
284
285 /* Only thread1 is allowed to do blocking callbacks since gfs
286 may wait for a completion callback within a blocking cb. */
287 27
288 while (!kthread_should_stop()) { 28 while (!kthread_should_stop()) {
289 wait_event_interruptible(ls->thread_wait, 29 wait_event_interruptible(ls->thread_wait,
290 !no_work(ls, blist) || kthread_should_stop()); 30 !no_work(ls) || kthread_should_stop());
291
292 complete = blocking = submit = drop = 0;
293 31
294 spin_lock(&ls->async_lock); 32 spin_lock(&ls->async_lock);
295 33
296 if (blist && !list_empty(&ls->blocking)) { 34 if (!list_empty(&ls->submit)) {
297 lp = list_entry(ls->blocking.next, struct gdlm_lock,
298 blist);
299 list_del_init(&lp->blist);
300 blocking = lp->bast_mode;
301 lp->bast_mode = 0;
302 } else if (!list_empty(&ls->complete)) {
303 lp = list_entry(ls->complete.next, struct gdlm_lock,
304 clist);
305 list_del_init(&lp->clist);
306 complete = 1;
307 } else if (!list_empty(&ls->submit)) {
308 lp = list_entry(ls->submit.next, struct gdlm_lock, 35 lp = list_entry(ls->submit.next, struct gdlm_lock,
309 delay_list); 36 delay_list);
310 list_del_init(&lp->delay_list); 37 list_del_init(&lp->delay_list);
311 submit = 1; 38 spin_unlock(&ls->async_lock);
39 gdlm_do_lock(lp);
40 spin_lock(&ls->async_lock);
312 } 41 }
313
314 drop = check_drop(ls);
315 spin_unlock(&ls->async_lock); 42 spin_unlock(&ls->async_lock);
316
317 if (complete)
318 process_complete(lp);
319
320 else if (blocking)
321 process_blocking(lp, blocking);
322
323 else if (submit)
324 gdlm_do_lock(lp);
325
326 if (drop)
327 ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL);
328
329 schedule();
330 } 43 }
331 44
332 return 0; 45 return 0;
333} 46}
334 47
335static int gdlm_thread1(void *data)
336{
337 return gdlm_thread(data, 1);
338}
339
340static int gdlm_thread2(void *data)
341{
342 return gdlm_thread(data, 0);
343}
344
345int gdlm_init_threads(struct gdlm_ls *ls) 48int gdlm_init_threads(struct gdlm_ls *ls)
346{ 49{
347 struct task_struct *p; 50 struct task_struct *p;
348 int error; 51 int error;
349 52
350 p = kthread_run(gdlm_thread1, ls, "lock_dlm1"); 53 p = kthread_run(gdlm_thread, ls, "lock_dlm");
351 error = IS_ERR(p);
352 if (error) {
353 log_error("can't start lock_dlm1 thread %d", error);
354 return error;
355 }
356 ls->thread1 = p;
357
358 p = kthread_run(gdlm_thread2, ls, "lock_dlm2");
359 error = IS_ERR(p); 54 error = IS_ERR(p);
360 if (error) { 55 if (error) {
361 log_error("can't start lock_dlm2 thread %d", error); 56 log_error("can't start lock_dlm thread %d", error);
362 kthread_stop(ls->thread1);
363 return error; 57 return error;
364 } 58 }
365 ls->thread2 = p; 59 ls->thread = p;
366 60
367 return 0; 61 return 0;
368} 62}
369 63
370void gdlm_release_threads(struct gdlm_ls *ls) 64void gdlm_release_threads(struct gdlm_ls *ls)
371{ 65{
372 kthread_stop(ls->thread1); 66 kthread_stop(ls->thread);
373 kthread_stop(ls->thread2);
374} 67}
375 68
diff --git a/fs/gfs2/locking/nolock/Makefile b/fs/gfs2/locking/nolock/Makefile
deleted file mode 100644
index 35e9730bc3a8..000000000000
--- a/fs/gfs2/locking/nolock/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += lock_nolock.o
2lock_nolock-y := main.o
3
diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c
deleted file mode 100644
index 284a5ece8d94..000000000000
--- a/fs/gfs2/locking/nolock/main.c
+++ /dev/null
@@ -1,238 +0,0 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/types.h>
14#include <linux/fs.h>
15#include <linux/lm_interface.h>
16
17struct nolock_lockspace {
18 unsigned int nl_lvb_size;
19};
20
21static const struct lm_lockops nolock_ops;
22
23static int nolock_mount(char *table_name, char *host_data,
24 lm_callback_t cb, void *cb_data,
25 unsigned int min_lvb_size, int flags,
26 struct lm_lockstruct *lockstruct,
27 struct kobject *fskobj)
28{
29 char *c;
30 unsigned int jid;
31 struct nolock_lockspace *nl;
32
33 c = strstr(host_data, "jid=");
34 if (!c)
35 jid = 0;
36 else {
37 c += 4;
38 sscanf(c, "%u", &jid);
39 }
40
41 nl = kzalloc(sizeof(struct nolock_lockspace), GFP_KERNEL);
42 if (!nl)
43 return -ENOMEM;
44
45 nl->nl_lvb_size = min_lvb_size;
46
47 lockstruct->ls_jid = jid;
48 lockstruct->ls_first = 1;
49 lockstruct->ls_lvb_size = min_lvb_size;
50 lockstruct->ls_lockspace = nl;
51 lockstruct->ls_ops = &nolock_ops;
52 lockstruct->ls_flags = LM_LSFLAG_LOCAL;
53
54 return 0;
55}
56
57static void nolock_others_may_mount(void *lockspace)
58{
59}
60
61static void nolock_unmount(void *lockspace)
62{
63 struct nolock_lockspace *nl = lockspace;
64 kfree(nl);
65}
66
67static void nolock_withdraw(void *lockspace)
68{
69}
70
71/**
72 * nolock_get_lock - get a lm_lock_t given a descripton of the lock
73 * @lockspace: the lockspace the lock lives in
74 * @name: the name of the lock
75 * @lockp: return the lm_lock_t here
76 *
77 * Returns: 0 on success, -EXXX on failure
78 */
79
80static int nolock_get_lock(void *lockspace, struct lm_lockname *name,
81 void **lockp)
82{
83 *lockp = lockspace;
84 return 0;
85}
86
87/**
88 * nolock_put_lock - get rid of a lock structure
89 * @lock: the lock to throw away
90 *
91 */
92
93static void nolock_put_lock(void *lock)
94{
95}
96
97/**
98 * nolock_lock - acquire a lock
99 * @lock: the lock to manipulate
100 * @cur_state: the current state
101 * @req_state: the requested state
102 * @flags: modifier flags
103 *
104 * Returns: A bitmap of LM_OUT_*
105 */
106
107static unsigned int nolock_lock(void *lock, unsigned int cur_state,
108 unsigned int req_state, unsigned int flags)
109{
110 return req_state | LM_OUT_CACHEABLE;
111}
112
113/**
114 * nolock_unlock - unlock a lock
115 * @lock: the lock to manipulate
116 * @cur_state: the current state
117 *
118 * Returns: 0
119 */
120
121static unsigned int nolock_unlock(void *lock, unsigned int cur_state)
122{
123 return 0;
124}
125
126static void nolock_cancel(void *lock)
127{
128}
129
130/**
131 * nolock_hold_lvb - hold on to a lock value block
132 * @lock: the lock the LVB is associated with
133 * @lvbp: return the lm_lvb_t here
134 *
135 * Returns: 0 on success, -EXXX on failure
136 */
137
138static int nolock_hold_lvb(void *lock, char **lvbp)
139{
140 struct nolock_lockspace *nl = lock;
141 int error = 0;
142
143 *lvbp = kzalloc(nl->nl_lvb_size, GFP_NOFS);
144 if (!*lvbp)
145 error = -ENOMEM;
146
147 return error;
148}
149
150/**
151 * nolock_unhold_lvb - release a LVB
152 * @lock: the lock the LVB is associated with
153 * @lvb: the lock value block
154 *
155 */
156
157static void nolock_unhold_lvb(void *lock, char *lvb)
158{
159 kfree(lvb);
160}
161
162static int nolock_plock_get(void *lockspace, struct lm_lockname *name,
163 struct file *file, struct file_lock *fl)
164{
165 posix_test_lock(file, fl);
166
167 return 0;
168}
169
170static int nolock_plock(void *lockspace, struct lm_lockname *name,
171 struct file *file, int cmd, struct file_lock *fl)
172{
173 int error;
174 error = posix_lock_file_wait(file, fl);
175 return error;
176}
177
178static int nolock_punlock(void *lockspace, struct lm_lockname *name,
179 struct file *file, struct file_lock *fl)
180{
181 int error;
182 error = posix_lock_file_wait(file, fl);
183 return error;
184}
185
186static void nolock_recovery_done(void *lockspace, unsigned int jid,
187 unsigned int message)
188{
189}
190
191static const struct lm_lockops nolock_ops = {
192 .lm_proto_name = "lock_nolock",
193 .lm_mount = nolock_mount,
194 .lm_others_may_mount = nolock_others_may_mount,
195 .lm_unmount = nolock_unmount,
196 .lm_withdraw = nolock_withdraw,
197 .lm_get_lock = nolock_get_lock,
198 .lm_put_lock = nolock_put_lock,
199 .lm_lock = nolock_lock,
200 .lm_unlock = nolock_unlock,
201 .lm_cancel = nolock_cancel,
202 .lm_hold_lvb = nolock_hold_lvb,
203 .lm_unhold_lvb = nolock_unhold_lvb,
204 .lm_plock_get = nolock_plock_get,
205 .lm_plock = nolock_plock,
206 .lm_punlock = nolock_punlock,
207 .lm_recovery_done = nolock_recovery_done,
208 .lm_owner = THIS_MODULE,
209};
210
211static int __init init_nolock(void)
212{
213 int error;
214
215 error = gfs2_register_lockproto(&nolock_ops);
216 if (error) {
217 printk(KERN_WARNING
218 "lock_nolock: can't register protocol: %d\n", error);
219 return error;
220 }
221
222 printk(KERN_INFO
223 "Lock_Nolock (built %s %s) installed\n", __DATE__, __TIME__);
224 return 0;
225}
226
227static void __exit exit_nolock(void)
228{
229 gfs2_unregister_lockproto(&nolock_ops);
230}
231
232module_init(init_nolock);
233module_exit(exit_nolock);
234
235MODULE_DESCRIPTION("GFS Nolock Locking Module");
236MODULE_AUTHOR("Red Hat, Inc.");
237MODULE_LICENSE("GPL");
238
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 548264b1836d..6c6af9f5e3ab 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -87,6 +87,8 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
87 */ 87 */
88 88
89static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 89static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
90__releases(&sdp->sd_log_lock)
91__acquires(&sdp->sd_log_lock)
90{ 92{
91 struct gfs2_bufdata *bd, *s; 93 struct gfs2_bufdata *bd, *s;
92 struct buffer_head *bh; 94 struct buffer_head *bh;
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 771152816508..7c64510ccfd2 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23static inline void gfs2_log_lock(struct gfs2_sbd *sdp) 23static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
24__acquires(&sdp->sd_log_lock)
24{ 25{
25 spin_lock(&sdp->sd_log_lock); 26 spin_lock(&sdp->sd_log_lock);
26} 27}
@@ -32,6 +33,7 @@ static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
32 */ 33 */
33 34
34static inline void gfs2_log_unlock(struct gfs2_sbd *sdp) 35static inline void gfs2_log_unlock(struct gfs2_sbd *sdp)
36__releases(&sdp->sd_log_lock)
35{ 37{
36 spin_unlock(&sdp->sd_log_lock); 38 spin_unlock(&sdp->sd_log_lock);
37} 39}
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 053e2ebbbd50..bcc668d0fadd 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -40,8 +40,6 @@ static void gfs2_init_glock_once(struct kmem_cache *cachep, void *foo)
40 INIT_HLIST_NODE(&gl->gl_list); 40 INIT_HLIST_NODE(&gl->gl_list);
41 spin_lock_init(&gl->gl_spin); 41 spin_lock_init(&gl->gl_spin);
42 INIT_LIST_HEAD(&gl->gl_holders); 42 INIT_LIST_HEAD(&gl->gl_holders);
43 INIT_LIST_HEAD(&gl->gl_waiters1);
44 INIT_LIST_HEAD(&gl->gl_waiters3);
45 gl->gl_lvb = NULL; 43 gl->gl_lvb = NULL;
46 atomic_set(&gl->gl_lvb_count, 0); 44 atomic_set(&gl->gl_lvb_count, 0);
47 INIT_LIST_HEAD(&gl->gl_reclaim); 45 INIT_LIST_HEAD(&gl->gl_reclaim);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 78d75f892f82..09853620c951 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -129,7 +129,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
129} 129}
130 130
131/** 131/**
132 * getbuf - Get a buffer with a given address space 132 * gfs2_getbuf - Get a buffer with a given address space
133 * @gl: the glock 133 * @gl: the glock
134 * @blkno: the block number (filesystem scope) 134 * @blkno: the block number (filesystem scope)
135 * @create: 1 if the buffer should be created 135 * @create: 1 if the buffer should be created
@@ -137,7 +137,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
137 * Returns: the buffer 137 * Returns: the buffer
138 */ 138 */
139 139
140static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create) 140struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
141{ 141{
142 struct address_space *mapping = gl->gl_aspace->i_mapping; 142 struct address_space *mapping = gl->gl_aspace->i_mapping;
143 struct gfs2_sbd *sdp = gl->gl_sbd; 143 struct gfs2_sbd *sdp = gl->gl_sbd;
@@ -205,7 +205,7 @@ static void meta_prep_new(struct buffer_head *bh)
205struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) 205struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
206{ 206{
207 struct buffer_head *bh; 207 struct buffer_head *bh;
208 bh = getbuf(gl, blkno, CREATE); 208 bh = gfs2_getbuf(gl, blkno, CREATE);
209 meta_prep_new(bh); 209 meta_prep_new(bh);
210 return bh; 210 return bh;
211} 211}
@@ -223,7 +223,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
223int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 223int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
224 struct buffer_head **bhp) 224 struct buffer_head **bhp)
225{ 225{
226 *bhp = getbuf(gl, blkno, CREATE); 226 *bhp = gfs2_getbuf(gl, blkno, CREATE);
227 if (!buffer_uptodate(*bhp)) { 227 if (!buffer_uptodate(*bhp)) {
228 ll_rw_block(READ_META, 1, bhp); 228 ll_rw_block(READ_META, 1, bhp);
229 if (flags & DIO_WAIT) { 229 if (flags & DIO_WAIT) {
@@ -346,7 +346,7 @@ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
346 struct buffer_head *bh; 346 struct buffer_head *bh;
347 347
348 while (blen) { 348 while (blen) {
349 bh = getbuf(ip->i_gl, bstart, NO_CREATE); 349 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
350 if (bh) { 350 if (bh) {
351 lock_buffer(bh); 351 lock_buffer(bh);
352 gfs2_log_lock(sdp); 352 gfs2_log_lock(sdp);
@@ -421,7 +421,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
421 if (extlen > max_ra) 421 if (extlen > max_ra)
422 extlen = max_ra; 422 extlen = max_ra;
423 423
424 first_bh = getbuf(gl, dblock, CREATE); 424 first_bh = gfs2_getbuf(gl, dblock, CREATE);
425 425
426 if (buffer_uptodate(first_bh)) 426 if (buffer_uptodate(first_bh))
427 goto out; 427 goto out;
@@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
432 extlen--; 432 extlen--;
433 433
434 while (extlen) { 434 while (extlen) {
435 bh = getbuf(gl, dblock, CREATE); 435 bh = gfs2_getbuf(gl, dblock, CREATE);
436 436
437 if (!buffer_uptodate(bh) && !buffer_locked(bh)) 437 if (!buffer_uptodate(bh) && !buffer_locked(bh))
438 ll_rw_block(READA, 1, &bh); 438 ll_rw_block(READA, 1, &bh);
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 73e3b1c76fe1..b1a5f3674d43 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -47,6 +47,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
47int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, 47int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno,
48 int flags, struct buffer_head **bhp); 48 int flags, struct buffer_head **bhp);
49int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh); 49int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
50struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create);
50 51
51void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh, 52void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
52 int meta); 53 int meta);
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index f55394e57cb2..e64a1b04117a 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -499,34 +499,34 @@ static int __gfs2_readpage(void *file, struct page *page)
499 * @file: The file to read 499 * @file: The file to read
500 * @page: The page of the file 500 * @page: The page of the file
501 * 501 *
502 * This deals with the locking required. We use a trylock in order to 502 * This deals with the locking required. We have to unlock and
503 * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE 503 * relock the page in order to get the locking in the right
504 * in the event that we are unable to get the lock. 504 * order.
505 */ 505 */
506 506
507static int gfs2_readpage(struct file *file, struct page *page) 507static int gfs2_readpage(struct file *file, struct page *page)
508{ 508{
509 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 509 struct address_space *mapping = page->mapping;
510 struct gfs2_holder *gh; 510 struct gfs2_inode *ip = GFS2_I(mapping->host);
511 struct gfs2_holder gh;
511 int error; 512 int error;
512 513
513 gh = gfs2_glock_is_locked_by_me(ip->i_gl); 514 unlock_page(page);
514 if (!gh) { 515 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
515 gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS); 516 error = gfs2_glock_nq_atime(&gh);
516 if (!gh) 517 if (unlikely(error))
517 return -ENOBUFS; 518 goto out;
518 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh); 519 error = AOP_TRUNCATED_PAGE;
520 lock_page(page);
521 if (page->mapping == mapping && !PageUptodate(page))
522 error = __gfs2_readpage(file, page);
523 else
519 unlock_page(page); 524 unlock_page(page);
520 error = gfs2_glock_nq_atime(gh); 525 gfs2_glock_dq(&gh);
521 if (likely(error != 0))
522 goto out;
523 return AOP_TRUNCATED_PAGE;
524 }
525 error = __gfs2_readpage(file, page);
526 gfs2_glock_dq(gh);
527out: 526out:
528 gfs2_holder_uninit(gh); 527 gfs2_holder_uninit(&gh);
529 kfree(gh); 528 if (error && error != AOP_TRUNCATED_PAGE)
529 lock_page(page);
530 return error; 530 return error;
531} 531}
532 532
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 24dd59450088..e9a366d4411c 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -15,6 +15,7 @@
15#include <linux/uio.h> 15#include <linux/uio.h>
16#include <linux/blkdev.h> 16#include <linux/blkdev.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/mount.h>
18#include <linux/fs.h> 19#include <linux/fs.h>
19#include <linux/gfs2_ondisk.h> 20#include <linux/gfs2_ondisk.h>
20#include <linux/ext2_fs.h> 21#include <linux/ext2_fs.h>
@@ -133,7 +134,6 @@ static const u32 fsflags_to_gfs2[32] = {
133 [7] = GFS2_DIF_NOATIME, 134 [7] = GFS2_DIF_NOATIME,
134 [12] = GFS2_DIF_EXHASH, 135 [12] = GFS2_DIF_EXHASH,
135 [14] = GFS2_DIF_INHERIT_JDATA, 136 [14] = GFS2_DIF_INHERIT_JDATA,
136 [20] = GFS2_DIF_INHERIT_DIRECTIO,
137}; 137};
138 138
139static const u32 gfs2_to_fsflags[32] = { 139static const u32 gfs2_to_fsflags[32] = {
@@ -142,7 +142,6 @@ static const u32 gfs2_to_fsflags[32] = {
142 [gfs2fl_AppendOnly] = FS_APPEND_FL, 142 [gfs2fl_AppendOnly] = FS_APPEND_FL,
143 [gfs2fl_NoAtime] = FS_NOATIME_FL, 143 [gfs2fl_NoAtime] = FS_NOATIME_FL,
144 [gfs2fl_ExHash] = FS_INDEX_FL, 144 [gfs2fl_ExHash] = FS_INDEX_FL,
145 [gfs2fl_InheritDirectio] = FS_DIRECTIO_FL,
146 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL, 145 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
147}; 146};
148 147
@@ -160,12 +159,8 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
160 return error; 159 return error;
161 160
162 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags); 161 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags);
163 if (!S_ISDIR(inode->i_mode)) { 162 if (!S_ISDIR(inode->i_mode) && ip->i_di.di_flags & GFS2_DIF_JDATA)
164 if (ip->i_di.di_flags & GFS2_DIF_JDATA) 163 fsflags |= FS_JOURNAL_DATA_FL;
165 fsflags |= FS_JOURNAL_DATA_FL;
166 if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
167 fsflags |= FS_DIRECTIO_FL;
168 }
169 if (put_user(fsflags, ptr)) 164 if (put_user(fsflags, ptr))
170 error = -EFAULT; 165 error = -EFAULT;
171 166
@@ -194,13 +189,11 @@ void gfs2_set_inode_flags(struct inode *inode)
194 189
195/* Flags that can be set by user space */ 190/* Flags that can be set by user space */
196#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \ 191#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
197 GFS2_DIF_DIRECTIO| \
198 GFS2_DIF_IMMUTABLE| \ 192 GFS2_DIF_IMMUTABLE| \
199 GFS2_DIF_APPENDONLY| \ 193 GFS2_DIF_APPENDONLY| \
200 GFS2_DIF_NOATIME| \ 194 GFS2_DIF_NOATIME| \
201 GFS2_DIF_SYNC| \ 195 GFS2_DIF_SYNC| \
202 GFS2_DIF_SYSTEM| \ 196 GFS2_DIF_SYSTEM| \
203 GFS2_DIF_INHERIT_DIRECTIO| \
204 GFS2_DIF_INHERIT_JDATA) 197 GFS2_DIF_INHERIT_JDATA)
205 198
206/** 199/**
@@ -220,10 +213,14 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
220 int error; 213 int error;
221 u32 new_flags, flags; 214 u32 new_flags, flags;
222 215
223 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 216 error = mnt_want_write(filp->f_path.mnt);
224 if (error) 217 if (error)
225 return error; 218 return error;
226 219
220 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
221 if (error)
222 goto out_drop_write;
223
227 flags = ip->i_di.di_flags; 224 flags = ip->i_di.di_flags;
228 new_flags = (flags & ~mask) | (reqflags & mask); 225 new_flags = (flags & ~mask) | (reqflags & mask);
229 if ((new_flags ^ flags) == 0) 226 if ((new_flags ^ flags) == 0)
@@ -242,7 +239,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
242 !capable(CAP_LINUX_IMMUTABLE)) 239 !capable(CAP_LINUX_IMMUTABLE))
243 goto out; 240 goto out;
244 if (!IS_IMMUTABLE(inode)) { 241 if (!IS_IMMUTABLE(inode)) {
245 error = permission(inode, MAY_WRITE, NULL); 242 error = gfs2_permission(inode, MAY_WRITE);
246 if (error) 243 if (error)
247 goto out; 244 goto out;
248 } 245 }
@@ -272,6 +269,8 @@ out_trans_end:
272 gfs2_trans_end(sdp); 269 gfs2_trans_end(sdp);
273out: 270out:
274 gfs2_glock_dq_uninit(&gh); 271 gfs2_glock_dq_uninit(&gh);
272out_drop_write:
273 mnt_drop_write(filp->f_path.mnt);
275 return error; 274 return error;
276} 275}
277 276
@@ -285,8 +284,6 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
285 if (!S_ISDIR(inode->i_mode)) { 284 if (!S_ISDIR(inode->i_mode)) {
286 if (gfsflags & GFS2_DIF_INHERIT_JDATA) 285 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
287 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA); 286 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
288 if (gfsflags & GFS2_DIF_INHERIT_DIRECTIO)
289 gfsflags ^= (GFS2_DIF_DIRECTIO | GFS2_DIF_INHERIT_DIRECTIO);
290 return do_gfs2_set_flags(filp, gfsflags, ~0); 287 return do_gfs2_set_flags(filp, gfsflags, ~0);
291 } 288 }
292 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA); 289 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
@@ -487,11 +484,6 @@ static int gfs2_open(struct inode *inode, struct file *file)
487 goto fail_gunlock; 484 goto fail_gunlock;
488 } 485 }
489 486
490 /* Listen to the Direct I/O flag */
491
492 if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
493 file->f_flags |= O_DIRECT;
494
495 gfs2_glock_dq_uninit(&i_gh); 487 gfs2_glock_dq_uninit(&i_gh);
496 } 488 }
497 489
@@ -669,8 +661,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
669 int error = 0; 661 int error = 0;
670 662
671 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 663 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
672 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE 664 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
673 | GL_FLOCK;
674 665
675 mutex_lock(&fp->f_fl_mutex); 666 mutex_lock(&fp->f_fl_mutex);
676 667
@@ -683,9 +674,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
683 gfs2_glock_dq_wait(fl_gh); 674 gfs2_glock_dq_wait(fl_gh);
684 gfs2_holder_reinit(state, flags, fl_gh); 675 gfs2_holder_reinit(state, flags, fl_gh);
685 } else { 676 } else {
686 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), 677 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
687 ip->i_no_addr, &gfs2_flock_glops, 678 &gfs2_flock_glops, CREATE, &gl);
688 CREATE, &gl);
689 if (error) 679 if (error)
690 goto out; 680 goto out;
691 gfs2_holder_init(gl, state, flags, fl_gh); 681 gfs2_holder_init(gl, state, flags, fl_gh);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index b2028c82e8d1..b4d1d6490633 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -64,7 +64,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
64 mutex_init(&sdp->sd_rindex_mutex); 64 mutex_init(&sdp->sd_rindex_mutex);
65 INIT_LIST_HEAD(&sdp->sd_rindex_list); 65 INIT_LIST_HEAD(&sdp->sd_rindex_list);
66 INIT_LIST_HEAD(&sdp->sd_rindex_mru_list); 66 INIT_LIST_HEAD(&sdp->sd_rindex_mru_list);
67 INIT_LIST_HEAD(&sdp->sd_rindex_recent_list);
68 67
69 INIT_LIST_HEAD(&sdp->sd_jindex_list); 68 INIT_LIST_HEAD(&sdp->sd_jindex_list);
70 spin_lock_init(&sdp->sd_jindex_spin); 69 spin_lock_init(&sdp->sd_jindex_spin);
@@ -364,6 +363,8 @@ static int map_journal_extents(struct gfs2_sbd *sdp)
364 363
365static void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp) 364static void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
366{ 365{
366 if (!sdp->sd_lockstruct.ls_ops->lm_others_may_mount)
367 return;
367 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 368 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
368 sdp->sd_lockstruct.ls_ops->lm_others_may_mount( 369 sdp->sd_lockstruct.ls_ops->lm_others_may_mount(
369 sdp->sd_lockstruct.ls_lockspace); 370 sdp->sd_lockstruct.ls_lockspace);
@@ -741,8 +742,7 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
741 goto out; 742 goto out;
742 } 743 }
743 744
744 if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lockspace) || 745 if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
745 gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
746 gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >= 746 gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
747 GFS2_MIN_LVB_SIZE)) { 747 GFS2_MIN_LVB_SIZE)) {
748 gfs2_unmount_lockproto(&sdp->sd_lockstruct); 748 gfs2_unmount_lockproto(&sdp->sd_lockstruct);
@@ -873,7 +873,7 @@ fail_sb:
873fail_locking: 873fail_locking:
874 init_locking(sdp, &mount_gh, UNDO); 874 init_locking(sdp, &mount_gh, UNDO);
875fail_lm: 875fail_lm:
876 gfs2_gl_hash_clear(sdp, WAIT); 876 gfs2_gl_hash_clear(sdp);
877 gfs2_lm_unmount(sdp); 877 gfs2_lm_unmount(sdp);
878 while (invalidate_inodes(sb)) 878 while (invalidate_inodes(sb))
879 yield(); 879 yield();
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 2686ad4c0029..1e252dfc5294 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -163,7 +163,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
163 if (error) 163 if (error)
164 goto out; 164 goto out;
165 165
166 error = permission(dir, MAY_WRITE | MAY_EXEC, NULL); 166 error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC);
167 if (error) 167 if (error)
168 goto out_gunlock; 168 goto out_gunlock;
169 169
@@ -669,7 +669,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
669 } 669 }
670 } 670 }
671 } else { 671 } else {
672 error = permission(ndir, MAY_WRITE | MAY_EXEC, NULL); 672 error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC);
673 if (error) 673 if (error)
674 goto out_gunlock; 674 goto out_gunlock;
675 675
@@ -704,7 +704,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
704 /* Check out the dir to be renamed */ 704 /* Check out the dir to be renamed */
705 705
706 if (dir_rename) { 706 if (dir_rename) {
707 error = permission(odentry->d_inode, MAY_WRITE, NULL); 707 error = gfs2_permission(odentry->d_inode, MAY_WRITE);
708 if (error) 708 if (error)
709 goto out_gunlock; 709 goto out_gunlock;
710 } 710 }
@@ -891,7 +891,7 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
891 * Returns: errno 891 * Returns: errno
892 */ 892 */
893 893
894static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd) 894int gfs2_permission(struct inode *inode, int mask)
895{ 895{
896 struct gfs2_inode *ip = GFS2_I(inode); 896 struct gfs2_inode *ip = GFS2_I(inode);
897 struct gfs2_holder i_gh; 897 struct gfs2_holder i_gh;
@@ -905,13 +905,22 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
905 unlock = 1; 905 unlock = 1;
906 } 906 }
907 907
908 error = generic_permission(inode, mask, gfs2_check_acl); 908 if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
909 error = -EACCES;
910 else
911 error = generic_permission(inode, mask, gfs2_check_acl);
909 if (unlock) 912 if (unlock)
910 gfs2_glock_dq_uninit(&i_gh); 913 gfs2_glock_dq_uninit(&i_gh);
911 914
912 return error; 915 return error;
913} 916}
914 917
918static int gfs2_iop_permission(struct inode *inode, int mask,
919 struct nameidata *nd)
920{
921 return gfs2_permission(inode, mask);
922}
923
915static int setattr_size(struct inode *inode, struct iattr *attr) 924static int setattr_size(struct inode *inode, struct iattr *attr)
916{ 925{
917 struct gfs2_inode *ip = GFS2_I(inode); 926 struct gfs2_inode *ip = GFS2_I(inode);
@@ -1141,7 +1150,7 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name)
1141} 1150}
1142 1151
1143const struct inode_operations gfs2_file_iops = { 1152const struct inode_operations gfs2_file_iops = {
1144 .permission = gfs2_permission, 1153 .permission = gfs2_iop_permission,
1145 .setattr = gfs2_setattr, 1154 .setattr = gfs2_setattr,
1146 .getattr = gfs2_getattr, 1155 .getattr = gfs2_getattr,
1147 .setxattr = gfs2_setxattr, 1156 .setxattr = gfs2_setxattr,
@@ -1160,7 +1169,7 @@ const struct inode_operations gfs2_dir_iops = {
1160 .rmdir = gfs2_rmdir, 1169 .rmdir = gfs2_rmdir,
1161 .mknod = gfs2_mknod, 1170 .mknod = gfs2_mknod,
1162 .rename = gfs2_rename, 1171 .rename = gfs2_rename,
1163 .permission = gfs2_permission, 1172 .permission = gfs2_iop_permission,
1164 .setattr = gfs2_setattr, 1173 .setattr = gfs2_setattr,
1165 .getattr = gfs2_getattr, 1174 .getattr = gfs2_getattr,
1166 .setxattr = gfs2_setxattr, 1175 .setxattr = gfs2_setxattr,
@@ -1172,7 +1181,7 @@ const struct inode_operations gfs2_dir_iops = {
1172const struct inode_operations gfs2_symlink_iops = { 1181const struct inode_operations gfs2_symlink_iops = {
1173 .readlink = gfs2_readlink, 1182 .readlink = gfs2_readlink,
1174 .follow_link = gfs2_follow_link, 1183 .follow_link = gfs2_follow_link,
1175 .permission = gfs2_permission, 1184 .permission = gfs2_iop_permission,
1176 .setattr = gfs2_setattr, 1185 .setattr = gfs2_setattr,
1177 .getattr = gfs2_getattr, 1186 .getattr = gfs2_getattr,
1178 .setxattr = gfs2_setxattr, 1187 .setxattr = gfs2_setxattr,
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 0b7cc920eb89..f66ea0f7a356 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -126,7 +126,7 @@ static void gfs2_put_super(struct super_block *sb)
126 gfs2_clear_rgrpd(sdp); 126 gfs2_clear_rgrpd(sdp);
127 gfs2_jindex_free(sdp); 127 gfs2_jindex_free(sdp);
128 /* Take apart glock structures and buffer lists */ 128 /* Take apart glock structures and buffer lists */
129 gfs2_gl_hash_clear(sdp, WAIT); 129 gfs2_gl_hash_clear(sdp);
130 /* Unmount the locking protocol */ 130 /* Unmount the locking protocol */
131 gfs2_lm_unmount(sdp); 131 gfs2_lm_unmount(sdp);
132 132
@@ -155,7 +155,7 @@ static void gfs2_write_super(struct super_block *sb)
155static int gfs2_sync_fs(struct super_block *sb, int wait) 155static int gfs2_sync_fs(struct super_block *sb, int wait)
156{ 156{
157 sb->s_dirt = 0; 157 sb->s_dirt = 0;
158 if (wait) 158 if (wait && sb->s_fs_info)
159 gfs2_log_flush(sb->s_fs_info, NULL); 159 gfs2_log_flush(sb->s_fs_info, NULL);
160 return 0; 160 return 0;
161} 161}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 56aaf915c59a..3e073f5144fa 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -904,7 +904,7 @@ static int need_sync(struct gfs2_quota_data *qd)
904 do_sync = 0; 904 do_sync = 0;
905 else { 905 else {
906 value *= gfs2_jindex_size(sdp) * num; 906 value *= gfs2_jindex_size(sdp) * num;
907 do_div(value, den); 907 value = div_s64(value, den);
908 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); 908 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
909 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 909 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
910 do_sync = 0; 910 do_sync = 0;
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 2888e4b4b1c5..d5e91f4f6a0b 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -428,6 +428,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
428static void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid, 428static void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
429 unsigned int message) 429 unsigned int message)
430{ 430{
431 if (!sdp->sd_lockstruct.ls_ops->lm_recovery_done)
432 return;
433
431 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 434 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
432 sdp->sd_lockstruct.ls_ops->lm_recovery_done( 435 sdp->sd_lockstruct.ls_ops->lm_recovery_done(
433 sdp->sd_lockstruct.ls_lockspace, jid, message); 436 sdp->sd_lockstruct.ls_lockspace, jid, message);
@@ -505,7 +508,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
505 508
506 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 509 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
507 LM_FLAG_NOEXP | LM_FLAG_PRIORITY | 510 LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
508 GL_NOCANCEL | GL_NOCACHE, &t_gh); 511 GL_NOCACHE, &t_gh);
509 if (error) 512 if (error)
510 goto fail_gunlock_ji; 513 goto fail_gunlock_ji;
511 514
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 3401628d742b..2d90fb253505 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -371,11 +371,6 @@ static void clear_rgrpdi(struct gfs2_sbd *sdp)
371 371
372 spin_lock(&sdp->sd_rindex_spin); 372 spin_lock(&sdp->sd_rindex_spin);
373 sdp->sd_rindex_forward = NULL; 373 sdp->sd_rindex_forward = NULL;
374 head = &sdp->sd_rindex_recent_list;
375 while (!list_empty(head)) {
376 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
377 list_del(&rgd->rd_recent);
378 }
379 spin_unlock(&sdp->sd_rindex_spin); 374 spin_unlock(&sdp->sd_rindex_spin);
380 375
381 head = &sdp->sd_rindex_list; 376 head = &sdp->sd_rindex_list;
@@ -945,107 +940,30 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
945} 940}
946 941
947/** 942/**
948 * recent_rgrp_first - get first RG from "recent" list
949 * @sdp: The GFS2 superblock
950 * @rglast: address of the rgrp used last
951 *
952 * Returns: The first rgrp in the recent list
953 */
954
955static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
956 u64 rglast)
957{
958 struct gfs2_rgrpd *rgd;
959
960 spin_lock(&sdp->sd_rindex_spin);
961
962 if (rglast) {
963 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
964 if (rgrp_contains_block(rgd, rglast))
965 goto out;
966 }
967 }
968 rgd = NULL;
969 if (!list_empty(&sdp->sd_rindex_recent_list))
970 rgd = list_entry(sdp->sd_rindex_recent_list.next,
971 struct gfs2_rgrpd, rd_recent);
972out:
973 spin_unlock(&sdp->sd_rindex_spin);
974 return rgd;
975}
976
977/**
978 * recent_rgrp_next - get next RG from "recent" list 943 * recent_rgrp_next - get next RG from "recent" list
979 * @cur_rgd: current rgrp 944 * @cur_rgd: current rgrp
980 * @remove:
981 * 945 *
982 * Returns: The next rgrp in the recent list 946 * Returns: The next rgrp in the recent list
983 */ 947 */
984 948
985static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd, 949static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd)
986 int remove)
987{ 950{
988 struct gfs2_sbd *sdp = cur_rgd->rd_sbd; 951 struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
989 struct list_head *head; 952 struct list_head *head;
990 struct gfs2_rgrpd *rgd; 953 struct gfs2_rgrpd *rgd;
991 954
992 spin_lock(&sdp->sd_rindex_spin); 955 spin_lock(&sdp->sd_rindex_spin);
993 956 head = &sdp->sd_rindex_mru_list;
994 head = &sdp->sd_rindex_recent_list; 957 if (unlikely(cur_rgd->rd_list_mru.next == head)) {
995 958 spin_unlock(&sdp->sd_rindex_spin);
996 list_for_each_entry(rgd, head, rd_recent) { 959 return NULL;
997 if (rgd == cur_rgd) {
998 if (cur_rgd->rd_recent.next != head)
999 rgd = list_entry(cur_rgd->rd_recent.next,
1000 struct gfs2_rgrpd, rd_recent);
1001 else
1002 rgd = NULL;
1003
1004 if (remove)
1005 list_del(&cur_rgd->rd_recent);
1006
1007 goto out;
1008 }
1009 } 960 }
1010 961 rgd = list_entry(cur_rgd->rd_list_mru.next, struct gfs2_rgrpd, rd_list_mru);
1011 rgd = NULL;
1012 if (!list_empty(head))
1013 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
1014
1015out:
1016 spin_unlock(&sdp->sd_rindex_spin); 962 spin_unlock(&sdp->sd_rindex_spin);
1017 return rgd; 963 return rgd;
1018} 964}
1019 965
1020/** 966/**
1021 * recent_rgrp_add - add an RG to tail of "recent" list
1022 * @new_rgd: The rgrp to add
1023 *
1024 */
1025
1026static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
1027{
1028 struct gfs2_sbd *sdp = new_rgd->rd_sbd;
1029 struct gfs2_rgrpd *rgd;
1030 unsigned int count = 0;
1031 unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);
1032
1033 spin_lock(&sdp->sd_rindex_spin);
1034
1035 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
1036 if (rgd == new_rgd)
1037 goto out;
1038
1039 if (++count >= max)
1040 goto out;
1041 }
1042 list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);
1043
1044out:
1045 spin_unlock(&sdp->sd_rindex_spin);
1046}
1047
1048/**
1049 * forward_rgrp_get - get an rgrp to try next from full list 967 * forward_rgrp_get - get an rgrp to try next from full list
1050 * @sdp: The GFS2 superblock 968 * @sdp: The GFS2 superblock
1051 * 969 *
@@ -1112,9 +1030,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1112 int loops = 0; 1030 int loops = 0;
1113 int error, rg_locked; 1031 int error, rg_locked;
1114 1032
1115 /* Try recently successful rgrps */ 1033 rgd = gfs2_blk2rgrpd(sdp, ip->i_goal);
1116
1117 rgd = recent_rgrp_first(sdp, ip->i_goal);
1118 1034
1119 while (rgd) { 1035 while (rgd) {
1120 rg_locked = 0; 1036 rg_locked = 0;
@@ -1136,11 +1052,9 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1136 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1052 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1137 if (inode) 1053 if (inode)
1138 return inode; 1054 return inode;
1139 rgd = recent_rgrp_next(rgd, 1); 1055 /* fall through */
1140 break;
1141
1142 case GLR_TRYFAILED: 1056 case GLR_TRYFAILED:
1143 rgd = recent_rgrp_next(rgd, 0); 1057 rgd = recent_rgrp_next(rgd);
1144 break; 1058 break;
1145 1059
1146 default: 1060 default:
@@ -1199,7 +1113,9 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1199 1113
1200out: 1114out:
1201 if (begin) { 1115 if (begin) {
1202 recent_rgrp_add(rgd); 1116 spin_lock(&sdp->sd_rindex_spin);
1117 list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
1118 spin_unlock(&sdp->sd_rindex_spin);
1203 rgd = gfs2_rgrpd_get_next(rgd); 1119 rgd = gfs2_rgrpd_get_next(rgd);
1204 if (!rgd) 1120 if (!rgd)
1205 rgd = gfs2_rgrpd_get_first(sdp); 1121 rgd = gfs2_rgrpd_get_first(sdp);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 7aeacbc65f35..63a8a902d9db 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -65,7 +65,6 @@ void gfs2_tune_init(struct gfs2_tune *gt)
65 gt->gt_quota_quantum = 60; 65 gt->gt_quota_quantum = 60;
66 gt->gt_atime_quantum = 3600; 66 gt->gt_atime_quantum = 3600;
67 gt->gt_new_files_jdata = 0; 67 gt->gt_new_files_jdata = 0;
68 gt->gt_new_files_directio = 0;
69 gt->gt_max_readahead = 1 << 18; 68 gt->gt_max_readahead = 1 << 18;
70 gt->gt_stall_secs = 600; 69 gt->gt_stall_secs = 600;
71 gt->gt_complain_secs = 10; 70 gt->gt_complain_secs = 10;
@@ -941,8 +940,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
941 } 940 }
942 941
943 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED, 942 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
944 LM_FLAG_PRIORITY | GL_NOCACHE, 943 GL_NOCACHE, t_gh);
945 t_gh);
946 944
947 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 945 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
948 error = gfs2_jdesc_check(jd); 946 error = gfs2_jdesc_check(jd);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 9ab9fc85ecd0..74846559fc3f 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -110,18 +110,6 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
110 return len; 110 return len;
111} 111}
112 112
113static ssize_t shrink_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
114{
115 if (!capable(CAP_SYS_ADMIN))
116 return -EACCES;
117
118 if (simple_strtol(buf, NULL, 0) != 1)
119 return -EINVAL;
120
121 gfs2_gl_hash_clear(sdp, NO_WAIT);
122 return len;
123}
124
125static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, 113static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
126 size_t len) 114 size_t len)
127{ 115{
@@ -175,7 +163,6 @@ static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store)
175GFS2_ATTR(id, 0444, id_show, NULL); 163GFS2_ATTR(id, 0444, id_show, NULL);
176GFS2_ATTR(fsname, 0444, fsname_show, NULL); 164GFS2_ATTR(fsname, 0444, fsname_show, NULL);
177GFS2_ATTR(freeze, 0644, freeze_show, freeze_store); 165GFS2_ATTR(freeze, 0644, freeze_show, freeze_store);
178GFS2_ATTR(shrink, 0200, NULL, shrink_store);
179GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 166GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
180GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store); 167GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store);
181GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store); 168GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store);
@@ -186,7 +173,6 @@ static struct attribute *gfs2_attrs[] = {
186 &gfs2_attr_id.attr, 173 &gfs2_attr_id.attr,
187 &gfs2_attr_fsname.attr, 174 &gfs2_attr_fsname.attr,
188 &gfs2_attr_freeze.attr, 175 &gfs2_attr_freeze.attr,
189 &gfs2_attr_shrink.attr,
190 &gfs2_attr_withdraw.attr, 176 &gfs2_attr_withdraw.attr,
191 &gfs2_attr_statfs_sync.attr, 177 &gfs2_attr_statfs_sync.attr,
192 &gfs2_attr_quota_sync.attr, 178 &gfs2_attr_quota_sync.attr,
@@ -426,7 +412,6 @@ TUNE_ATTR(max_readahead, 0);
426TUNE_ATTR(complain_secs, 0); 412TUNE_ATTR(complain_secs, 0);
427TUNE_ATTR(statfs_slow, 0); 413TUNE_ATTR(statfs_slow, 0);
428TUNE_ATTR(new_files_jdata, 0); 414TUNE_ATTR(new_files_jdata, 0);
429TUNE_ATTR(new_files_directio, 0);
430TUNE_ATTR(quota_simul_sync, 1); 415TUNE_ATTR(quota_simul_sync, 1);
431TUNE_ATTR(quota_cache_secs, 1); 416TUNE_ATTR(quota_cache_secs, 1);
432TUNE_ATTR(stall_secs, 1); 417TUNE_ATTR(stall_secs, 1);
@@ -455,7 +440,6 @@ static struct attribute *tune_attrs[] = {
455 &tune_attr_quotad_secs.attr, 440 &tune_attr_quotad_secs.attr,
456 &tune_attr_quota_scale.attr, 441 &tune_attr_quota_scale.attr,
457 &tune_attr_new_files_jdata.attr, 442 &tune_attr_new_files_jdata.attr,
458 &tune_attr_new_files_directio.attr,
459 NULL, 443 NULL,
460}; 444};
461 445