aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2008-05-21 12:03:22 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2008-06-27 04:39:22 -0400
commit6802e3400ff4549525930ee744030c36fce9cc73 (patch)
treedb889bf5337c1d3bb12ebbf571c3c1cad1040496 /fs/gfs2
parent543cf4cb3fe6f6cae3651ba918b9c56200b257d0 (diff)
[GFS2] Clean up the glock core
This patch implements a number of cleanups to the core of the GFS2 glock code. As a result a lot of code is removed. It looks like a really big change, but actually a large part of this patch is either removing or moving existing code. There are some new bits too though, such as the new run_queue() function which is considerably streamlined. Highlights of this patch include: o Fixes a cluster coherency bug during SH -> EX lock conversions o Removes the "glmutex" code in favour of a single bit lock o Removes the ->go_xmote_bh() for inodes since it was duplicating ->go_lock() o We now only use the ->lm_lock() function for both locks and unlocks (i.e. unlock is a lock with target mode LM_ST_UNLOCKED) o The fast path is considerably shortly, giving performance gains especially with lock_nolock o The glock_workqueue is now used for all the callbacks from the DLM which allows us to simplify the lock_dlm module (see following patch) o The way is now open to make further changes such as eliminating the two threads (gfs2_glockd and gfs2_scand) in favour of a more efficient scheme. This patch has undergone extensive testing with various test suites so it should be pretty stable by now. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Bob Peterson <rpeterso@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c1611
-rw-r--r--fs/gfs2/glock.h9
-rw-r--r--fs/gfs2/glops.c70
-rw-r--r--fs/gfs2/incore.h35
-rw-r--r--fs/gfs2/locking/dlm/lock.c3
-rw-r--r--fs/gfs2/locking/nolock/main.c2
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/meta_io.c14
-rw-r--r--fs/gfs2/meta_io.h1
-rw-r--r--fs/gfs2/ops_address.c25
-rw-r--r--fs/gfs2/ops_file.c8
-rw-r--r--fs/gfs2/recovery.c2
-rw-r--r--fs/gfs2/super.c3
13 files changed, 736 insertions, 1049 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index d636b3e80f5d..519a54cc0b7b 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -45,21 +45,19 @@ struct gfs2_gl_hash_bucket {
45 struct hlist_head hb_list; 45 struct hlist_head hb_list;
46}; 46};
47 47
48struct glock_iter { 48struct gfs2_glock_iter {
49 int hash; /* hash bucket index */ 49 int hash; /* hash bucket index */
50 struct gfs2_sbd *sdp; /* incore superblock */ 50 struct gfs2_sbd *sdp; /* incore superblock */
51 struct gfs2_glock *gl; /* current glock struct */ 51 struct gfs2_glock *gl; /* current glock struct */
52 struct seq_file *seq; /* sequence file for debugfs */ 52 char string[512]; /* scratch space */
53 char string[512]; /* scratch space */
54}; 53};
55 54
56typedef void (*glock_examiner) (struct gfs2_glock * gl); 55typedef void (*glock_examiner) (struct gfs2_glock * gl);
57 56
58static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 57static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
59static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl); 58static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
60static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh); 59#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
61static void gfs2_glock_drop_th(struct gfs2_glock *gl); 60static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
62static void run_queue(struct gfs2_glock *gl);
63 61
64static DECLARE_RWSEM(gfs2_umount_flush_sem); 62static DECLARE_RWSEM(gfs2_umount_flush_sem);
65static struct dentry *gfs2_root; 63static struct dentry *gfs2_root;
@@ -123,33 +121,6 @@ static inline rwlock_t *gl_lock_addr(unsigned int x)
123#endif 121#endif
124 122
125/** 123/**
126 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
127 * @actual: the current state of the lock
128 * @requested: the lock state that was requested by the caller
129 * @flags: the modifier flags passed in by the caller
130 *
131 * Returns: 1 if the locks are compatible, 0 otherwise
132 */
133
134static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
135 int flags)
136{
137 if (actual == requested)
138 return 1;
139
140 if (flags & GL_EXACT)
141 return 0;
142
143 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
144 return 1;
145
146 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
147 return 1;
148
149 return 0;
150}
151
152/**
153 * gl_hash() - Turn glock number into hash bucket number 124 * gl_hash() - Turn glock number into hash bucket number
154 * @lock: The glock number 125 * @lock: The glock number
155 * 126 *
@@ -211,17 +182,14 @@ static void gfs2_glock_hold(struct gfs2_glock *gl)
211int gfs2_glock_put(struct gfs2_glock *gl) 182int gfs2_glock_put(struct gfs2_glock *gl)
212{ 183{
213 int rv = 0; 184 int rv = 0;
214 struct gfs2_sbd *sdp = gl->gl_sbd;
215 185
216 write_lock(gl_lock_addr(gl->gl_hash)); 186 write_lock(gl_lock_addr(gl->gl_hash));
217 if (atomic_dec_and_test(&gl->gl_ref)) { 187 if (atomic_dec_and_test(&gl->gl_ref)) {
218 hlist_del(&gl->gl_list); 188 hlist_del(&gl->gl_list);
219 write_unlock(gl_lock_addr(gl->gl_hash)); 189 write_unlock(gl_lock_addr(gl->gl_hash));
220 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); 190 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED);
221 gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); 191 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim));
222 gfs2_assert(sdp, list_empty(&gl->gl_holders)); 192 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
223 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
224 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
225 glock_free(gl); 193 glock_free(gl);
226 rv = 1; 194 rv = 1;
227 goto out; 195 goto out;
@@ -281,16 +249,382 @@ static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
281 return gl; 249 return gl;
282} 250}
283 251
252/**
253 * may_grant - check if its ok to grant a new lock
254 * @gl: The glock
255 * @gh: The lock request which we wish to grant
256 *
257 * Returns: true if its ok to grant the lock
258 */
259
260static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
261{
262 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
263 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
264 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
265 return 0;
266 if (gl->gl_state == gh->gh_state)
267 return 1;
268 if (gh->gh_flags & GL_EXACT)
269 return 0;
270 if (gh->gh_state == LM_ST_SHARED && gl->gl_state == LM_ST_EXCLUSIVE)
271 return 1;
272 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
273 return 1;
274 return 0;
275}
276
277static void gfs2_holder_wake(struct gfs2_holder *gh)
278{
279 clear_bit(HIF_WAIT, &gh->gh_iflags);
280 smp_mb__after_clear_bit();
281 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
282}
283
284/**
285 * do_promote - promote as many requests as possible on the current queue
286 * @gl: The glock
287 *
288 * Returns: true if there is a blocked holder at the head of the list
289 */
290
291static int do_promote(struct gfs2_glock *gl)
292{
293 const struct gfs2_glock_operations *glops = gl->gl_ops;
294 struct gfs2_holder *gh, *tmp;
295 int ret;
296
297restart:
298 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
299 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
300 continue;
301 if (may_grant(gl, gh)) {
302 if (gh->gh_list.prev == &gl->gl_holders &&
303 glops->go_lock) {
304 spin_unlock(&gl->gl_spin);
305 /* FIXME: eliminate this eventually */
306 ret = glops->go_lock(gh);
307 spin_lock(&gl->gl_spin);
308 if (ret) {
309 gh->gh_error = ret;
310 list_del_init(&gh->gh_list);
311 gfs2_holder_wake(gh);
312 goto restart;
313 }
314 set_bit(HIF_HOLDER, &gh->gh_iflags);
315 gfs2_holder_wake(gh);
316 goto restart;
317 }
318 set_bit(HIF_HOLDER, &gh->gh_iflags);
319 gfs2_holder_wake(gh);
320 continue;
321 }
322 if (gh->gh_list.prev == &gl->gl_holders)
323 return 1;
324 break;
325 }
326 return 0;
327}
328
329/**
330 * do_error - Something unexpected has happened during a lock request
331 *
332 */
333
334static inline void do_error(struct gfs2_glock *gl, const int ret)
335{
336 struct gfs2_holder *gh, *tmp;
337
338 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
339 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
340 continue;
341 if (ret & LM_OUT_ERROR)
342 gh->gh_error = -EIO;
343 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
344 gh->gh_error = GLR_TRYFAILED;
345 else
346 continue;
347 list_del_init(&gh->gh_list);
348 gfs2_holder_wake(gh);
349 }
350}
351
352/**
353 * find_first_waiter - find the first gh that's waiting for the glock
354 * @gl: the glock
355 */
356
357static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
358{
359 struct gfs2_holder *gh;
360
361 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
362 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
363 return gh;
364 }
365 return NULL;
366}
367
368/**
369 * state_change - record that the glock is now in a different state
370 * @gl: the glock
371 * @new_state the new state
372 *
373 */
374
375static void state_change(struct gfs2_glock *gl, unsigned int new_state)
376{
377 int held1, held2;
378
379 held1 = (gl->gl_state != LM_ST_UNLOCKED);
380 held2 = (new_state != LM_ST_UNLOCKED);
381
382 if (held1 != held2) {
383 if (held2)
384 gfs2_glock_hold(gl);
385 else
386 gfs2_glock_put(gl);
387 }
388
389 gl->gl_state = new_state;
390 gl->gl_tchange = jiffies;
391}
392
393static void gfs2_demote_wake(struct gfs2_glock *gl)
394{
395 gl->gl_demote_state = LM_ST_EXCLUSIVE;
396 clear_bit(GLF_DEMOTE, &gl->gl_flags);
397 smp_mb__after_clear_bit();
398 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
399}
400
401/**
402 * finish_xmote - The DLM has replied to one of our lock requests
403 * @gl: The glock
404 * @ret: The status from the DLM
405 *
406 */
407
408static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
409{
410 const struct gfs2_glock_operations *glops = gl->gl_ops;
411 struct gfs2_holder *gh;
412 unsigned state = ret & LM_OUT_ST_MASK;
413
414 spin_lock(&gl->gl_spin);
415 state_change(gl, state);
416 gh = find_first_waiter(gl);
417
418 /* Demote to UN request arrived during demote to SH or DF */
419 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
420 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
421 gl->gl_target = LM_ST_UNLOCKED;
422
423 /* Check for state != intended state */
424 if (unlikely(state != gl->gl_target)) {
425 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
426 /* move to back of queue and try next entry */
427 if (ret & LM_OUT_CANCELED) {
428 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
429 list_move_tail(&gh->gh_list, &gl->gl_holders);
430 gh = find_first_waiter(gl);
431 gl->gl_target = gh->gh_state;
432 goto retry;
433 }
434 /* Some error or failed "try lock" - report it */
435 if ((ret & LM_OUT_ERROR) ||
436 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
437 gl->gl_target = gl->gl_state;
438 do_error(gl, ret);
439 goto out;
440 }
441 }
442 switch(state) {
443 /* Unlocked due to conversion deadlock, try again */
444 case LM_ST_UNLOCKED:
445retry:
446 do_xmote(gl, gh, gl->gl_target);
447 break;
448 /* Conversion fails, unlock and try again */
449 case LM_ST_SHARED:
450 case LM_ST_DEFERRED:
451 do_xmote(gl, gh, LM_ST_UNLOCKED);
452 break;
453 default: /* Everything else */
454 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
455 GLOCK_BUG_ON(gl, 1);
456 }
457 spin_unlock(&gl->gl_spin);
458 gfs2_glock_put(gl);
459 return;
460 }
461
462 /* Fast path - we got what we asked for */
463 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
464 gfs2_demote_wake(gl);
465 if (state != LM_ST_UNLOCKED) {
466 if (glops->go_xmote_bh) {
467 int rv;
468 spin_unlock(&gl->gl_spin);
469 rv = glops->go_xmote_bh(gl, gh);
470 if (rv == -EAGAIN)
471 return;
472 spin_lock(&gl->gl_spin);
473 if (rv) {
474 do_error(gl, rv);
475 goto out;
476 }
477 }
478 do_promote(gl);
479 }
480out:
481 clear_bit(GLF_LOCK, &gl->gl_flags);
482 spin_unlock(&gl->gl_spin);
483 gfs2_glock_put(gl);
484}
485
486static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
487 unsigned int cur_state, unsigned int req_state,
488 unsigned int flags)
489{
490 int ret = LM_OUT_ERROR;
491 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
492 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
493 req_state, flags);
494 return ret;
495}
496
497/**
498 * do_xmote - Calls the DLM to change the state of a lock
499 * @gl: The lock state
500 * @gh: The holder (only for promotes)
501 * @target: The target lock state
502 *
503 */
504
505static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
506{
507 const struct gfs2_glock_operations *glops = gl->gl_ops;
508 struct gfs2_sbd *sdp = gl->gl_sbd;
509 unsigned int lck_flags = gh ? gh->gh_flags : 0;
510 int ret;
511
512 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
513 LM_FLAG_PRIORITY);
514 BUG_ON(gl->gl_state == target);
515 BUG_ON(gl->gl_state == gl->gl_target);
516 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
517 glops->go_inval) {
518 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
519 do_error(gl, 0); /* Fail queued try locks */
520 }
521 spin_unlock(&gl->gl_spin);
522 if (glops->go_xmote_th)
523 glops->go_xmote_th(gl);
524 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
525 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
526 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
527
528 gfs2_glock_hold(gl);
529 if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
530 gl->gl_state == LM_ST_DEFERRED) &&
531 !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
532 lck_flags |= LM_FLAG_TRY_1CB;
533 ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags);
534
535 if (!(ret & LM_OUT_ASYNC)) {
536 finish_xmote(gl, ret);
537 gfs2_glock_hold(gl);
538 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
539 gfs2_glock_put(gl);
540 } else {
541 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
542 }
543 spin_lock(&gl->gl_spin);
544}
545
546/**
547 * find_first_holder - find the first "holder" gh
548 * @gl: the glock
549 */
550
551static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
552{
553 struct gfs2_holder *gh;
554
555 if (!list_empty(&gl->gl_holders)) {
556 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
557 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
558 return gh;
559 }
560 return NULL;
561}
562
563/**
564 * run_queue - do all outstanding tasks related to a glock
565 * @gl: The glock in question
566 * @nonblock: True if we must not block in run_queue
567 *
568 */
569
570static void run_queue(struct gfs2_glock *gl, const int nonblock)
571{
572 struct gfs2_holder *gh = NULL;
573
574 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
575 return;
576
577 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
578
579 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
580 gl->gl_demote_state != gl->gl_state) {
581 if (find_first_holder(gl))
582 goto out;
583 if (nonblock)
584 goto out_sched;
585 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
586 gl->gl_target = gl->gl_demote_state;
587 } else {
588 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
589 gfs2_demote_wake(gl);
590 if (do_promote(gl) == 0)
591 goto out;
592 gh = find_first_waiter(gl);
593 gl->gl_target = gh->gh_state;
594 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
595 do_error(gl, 0); /* Fail queued try locks */
596 }
597 do_xmote(gl, gh, gl->gl_target);
598 return;
599
600out_sched:
601 gfs2_glock_hold(gl);
602 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
603 gfs2_glock_put(gl);
604out:
605 clear_bit(GLF_LOCK, &gl->gl_flags);
606}
607
284static void glock_work_func(struct work_struct *work) 608static void glock_work_func(struct work_struct *work)
285{ 609{
610 unsigned long delay = 0;
286 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 611 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
287 612
613 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
614 finish_xmote(gl, gl->gl_reply);
288 spin_lock(&gl->gl_spin); 615 spin_lock(&gl->gl_spin);
289 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)) 616 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)) {
290 set_bit(GLF_DEMOTE, &gl->gl_flags); 617 unsigned long holdtime, now = jiffies;
291 run_queue(gl); 618 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
619 if (time_before(now, holdtime))
620 delay = holdtime - now;
621 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
622 }
623 run_queue(gl, 0);
292 spin_unlock(&gl->gl_spin); 624 spin_unlock(&gl->gl_spin);
293 gfs2_glock_put(gl); 625 if (!delay ||
626 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
627 gfs2_glock_put(gl);
294} 628}
295 629
296static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name, 630static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
@@ -342,12 +676,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
342 gl->gl_name = name; 676 gl->gl_name = name;
343 atomic_set(&gl->gl_ref, 1); 677 atomic_set(&gl->gl_ref, 1);
344 gl->gl_state = LM_ST_UNLOCKED; 678 gl->gl_state = LM_ST_UNLOCKED;
679 gl->gl_target = LM_ST_UNLOCKED;
345 gl->gl_demote_state = LM_ST_EXCLUSIVE; 680 gl->gl_demote_state = LM_ST_EXCLUSIVE;
346 gl->gl_hash = hash; 681 gl->gl_hash = hash;
347 gl->gl_owner_pid = NULL;
348 gl->gl_ip = 0;
349 gl->gl_ops = glops; 682 gl->gl_ops = glops;
350 gl->gl_req_gh = NULL;
351 gl->gl_stamp = jiffies; 683 gl->gl_stamp = jiffies;
352 gl->gl_tchange = jiffies; 684 gl->gl_tchange = jiffies;
353 gl->gl_object = NULL; 685 gl->gl_object = NULL;
@@ -447,13 +779,6 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
447 gh->gh_ip = 0; 779 gh->gh_ip = 0;
448} 780}
449 781
450static void gfs2_holder_wake(struct gfs2_holder *gh)
451{
452 clear_bit(HIF_WAIT, &gh->gh_iflags);
453 smp_mb__after_clear_bit();
454 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
455}
456
457static int just_schedule(void *word) 782static int just_schedule(void *word)
458{ 783{
459 schedule(); 784 schedule();
@@ -466,14 +791,6 @@ static void wait_on_holder(struct gfs2_holder *gh)
466 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE); 791 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
467} 792}
468 793
469static void gfs2_demote_wake(struct gfs2_glock *gl)
470{
471 gl->gl_demote_state = LM_ST_EXCLUSIVE;
472 clear_bit(GLF_DEMOTE, &gl->gl_flags);
473 smp_mb__after_clear_bit();
474 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
475}
476
477static void wait_on_demote(struct gfs2_glock *gl) 794static void wait_on_demote(struct gfs2_glock *gl)
478{ 795{
479 might_sleep(); 796 might_sleep();
@@ -481,217 +798,6 @@ static void wait_on_demote(struct gfs2_glock *gl)
481} 798}
482 799
483/** 800/**
484 * rq_mutex - process a mutex request in the queue
485 * @gh: the glock holder
486 *
487 * Returns: 1 if the queue is blocked
488 */
489
490static int rq_mutex(struct gfs2_holder *gh)
491{
492 struct gfs2_glock *gl = gh->gh_gl;
493
494 list_del_init(&gh->gh_list);
495 /* gh->gh_error never examined. */
496 set_bit(GLF_LOCK, &gl->gl_flags);
497 clear_bit(HIF_WAIT, &gh->gh_iflags);
498 smp_mb();
499 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
500
501 return 1;
502}
503
504/**
505 * rq_promote - process a promote request in the queue
506 * @gh: the glock holder
507 *
508 * Acquire a new inter-node lock, or change a lock state to more restrictive.
509 *
510 * Returns: 1 if the queue is blocked
511 */
512
513static int rq_promote(struct gfs2_holder *gh)
514{
515 struct gfs2_glock *gl = gh->gh_gl;
516
517 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
518 if (list_empty(&gl->gl_holders)) {
519 gl->gl_req_gh = gh;
520 set_bit(GLF_LOCK, &gl->gl_flags);
521 spin_unlock(&gl->gl_spin);
522 gfs2_glock_xmote_th(gh->gh_gl, gh);
523 spin_lock(&gl->gl_spin);
524 }
525 return 1;
526 }
527
528 if (list_empty(&gl->gl_holders)) {
529 set_bit(HIF_FIRST, &gh->gh_iflags);
530 set_bit(GLF_LOCK, &gl->gl_flags);
531 } else {
532 struct gfs2_holder *next_gh;
533 if (gh->gh_state == LM_ST_EXCLUSIVE)
534 return 1;
535 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
536 gh_list);
537 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
538 return 1;
539 }
540
541 list_move_tail(&gh->gh_list, &gl->gl_holders);
542 gh->gh_error = 0;
543 set_bit(HIF_HOLDER, &gh->gh_iflags);
544
545 gfs2_holder_wake(gh);
546
547 return 0;
548}
549
550/**
551 * rq_demote - process a demote request in the queue
552 * @gh: the glock holder
553 *
554 * Returns: 1 if the queue is blocked
555 */
556
557static int rq_demote(struct gfs2_glock *gl)
558{
559 if (!list_empty(&gl->gl_holders))
560 return 1;
561
562 if (gl->gl_state == gl->gl_demote_state ||
563 gl->gl_state == LM_ST_UNLOCKED) {
564 gfs2_demote_wake(gl);
565 return 0;
566 }
567
568 set_bit(GLF_LOCK, &gl->gl_flags);
569 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
570
571 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
572 gl->gl_state != LM_ST_EXCLUSIVE) {
573 spin_unlock(&gl->gl_spin);
574 gfs2_glock_drop_th(gl);
575 } else {
576 spin_unlock(&gl->gl_spin);
577 gfs2_glock_xmote_th(gl, NULL);
578 }
579
580 spin_lock(&gl->gl_spin);
581 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
582
583 return 0;
584}
585
586/**
587 * run_queue - process holder structures on a glock
588 * @gl: the glock
589 *
590 */
591static void run_queue(struct gfs2_glock *gl)
592{
593 struct gfs2_holder *gh;
594 int blocked = 1;
595
596 for (;;) {
597 if (test_bit(GLF_LOCK, &gl->gl_flags))
598 break;
599
600 if (!list_empty(&gl->gl_waiters1)) {
601 gh = list_entry(gl->gl_waiters1.next,
602 struct gfs2_holder, gh_list);
603 blocked = rq_mutex(gh);
604 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
605 blocked = rq_demote(gl);
606 if (test_bit(GLF_WAITERS2, &gl->gl_flags) &&
607 !blocked) {
608 set_bit(GLF_DEMOTE, &gl->gl_flags);
609 gl->gl_demote_state = LM_ST_UNLOCKED;
610 }
611 clear_bit(GLF_WAITERS2, &gl->gl_flags);
612 } else if (!list_empty(&gl->gl_waiters3)) {
613 gh = list_entry(gl->gl_waiters3.next,
614 struct gfs2_holder, gh_list);
615 blocked = rq_promote(gh);
616 } else
617 break;
618
619 if (blocked)
620 break;
621 }
622}
623
624/**
625 * gfs2_glmutex_lock - acquire a local lock on a glock
626 * @gl: the glock
627 *
628 * Gives caller exclusive access to manipulate a glock structure.
629 */
630
631static void gfs2_glmutex_lock(struct gfs2_glock *gl)
632{
633 spin_lock(&gl->gl_spin);
634 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
635 struct gfs2_holder gh;
636
637 gfs2_holder_init(gl, 0, 0, &gh);
638 set_bit(HIF_WAIT, &gh.gh_iflags);
639 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
640 spin_unlock(&gl->gl_spin);
641 wait_on_holder(&gh);
642 gfs2_holder_uninit(&gh);
643 } else {
644 gl->gl_owner_pid = get_pid(task_pid(current));
645 gl->gl_ip = (unsigned long)__builtin_return_address(0);
646 spin_unlock(&gl->gl_spin);
647 }
648}
649
650/**
651 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
652 * @gl: the glock
653 *
654 * Returns: 1 if the glock is acquired
655 */
656
657static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
658{
659 int acquired = 1;
660
661 spin_lock(&gl->gl_spin);
662 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
663 acquired = 0;
664 } else {
665 gl->gl_owner_pid = get_pid(task_pid(current));
666 gl->gl_ip = (unsigned long)__builtin_return_address(0);
667 }
668 spin_unlock(&gl->gl_spin);
669
670 return acquired;
671}
672
673/**
674 * gfs2_glmutex_unlock - release a local lock on a glock
675 * @gl: the glock
676 *
677 */
678
679static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
680{
681 struct pid *pid;
682
683 spin_lock(&gl->gl_spin);
684 clear_bit(GLF_LOCK, &gl->gl_flags);
685 pid = gl->gl_owner_pid;
686 gl->gl_owner_pid = NULL;
687 gl->gl_ip = 0;
688 run_queue(gl);
689 spin_unlock(&gl->gl_spin);
690
691 put_pid(pid);
692}
693
694/**
695 * handle_callback - process a demote request 801 * handle_callback - process a demote request
696 * @gl: the glock 802 * @gl: the glock
697 * @state: the state the caller wants us to change to 803 * @state: the state the caller wants us to change to
@@ -705,398 +811,45 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
705{ 811{
706 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; 812 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
707 813
708 spin_lock(&gl->gl_spin);
709 set_bit(bit, &gl->gl_flags); 814 set_bit(bit, &gl->gl_flags);
710 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 815 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
711 gl->gl_demote_state = state; 816 gl->gl_demote_state = state;
712 gl->gl_demote_time = jiffies; 817 gl->gl_demote_time = jiffies;
713 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && 818 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
714 gl->gl_object) { 819 gl->gl_object)
715 gfs2_glock_schedule_for_reclaim(gl); 820 gfs2_glock_schedule_for_reclaim(gl);
716 spin_unlock(&gl->gl_spin);
717 return;
718 }
719 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 821 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
720 gl->gl_demote_state != state) { 822 gl->gl_demote_state != state) {
721 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 823 gl->gl_demote_state = LM_ST_UNLOCKED;
722 set_bit(GLF_WAITERS2, &gl->gl_flags);
723 else
724 gl->gl_demote_state = LM_ST_UNLOCKED;
725 } 824 }
726 spin_unlock(&gl->gl_spin);
727} 825}
728 826
729/** 827/**
730 * state_change - record that the glock is now in a different state 828 * gfs2_glock_wait - wait on a glock acquisition
731 * @gl: the glock
732 * @new_state the new state
733 *
734 */
735
736static void state_change(struct gfs2_glock *gl, unsigned int new_state)
737{
738 int held1, held2;
739
740 held1 = (gl->gl_state != LM_ST_UNLOCKED);
741 held2 = (new_state != LM_ST_UNLOCKED);
742
743 if (held1 != held2) {
744 if (held2)
745 gfs2_glock_hold(gl);
746 else
747 gfs2_glock_put(gl);
748 }
749
750 gl->gl_state = new_state;
751 gl->gl_tchange = jiffies;
752}
753
754/**
755 * drop_bh - Called after a lock module unlock completes
756 * @gl: the glock
757 * @ret: the return status
758 *
759 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
760 * Doesn't drop the reference on the glock the top half took out
761 *
762 */
763
764static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
765{
766 struct gfs2_sbd *sdp = gl->gl_sbd;
767 struct gfs2_holder *gh = gl->gl_req_gh;
768
769 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
770 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
771 gfs2_assert_warn(sdp, !ret);
772
773 state_change(gl, LM_ST_UNLOCKED);
774
775 if (test_and_clear_bit(GLF_CONV_DEADLK, &gl->gl_flags)) {
776 spin_lock(&gl->gl_spin);
777 gh->gh_error = 0;
778 spin_unlock(&gl->gl_spin);
779 gfs2_glock_xmote_th(gl, gl->gl_req_gh);
780 gfs2_glock_put(gl);
781 return;
782 }
783
784 spin_lock(&gl->gl_spin);
785 gfs2_demote_wake(gl);
786 clear_bit(GLF_LOCK, &gl->gl_flags);
787 spin_unlock(&gl->gl_spin);
788 gfs2_glock_put(gl);
789}
790
791/**
792 * xmote_bh - Called after the lock module is done acquiring a lock
793 * @gl: The glock in question
794 * @ret: the int returned from the lock module
795 *
796 */
797
798static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
799{
800 struct gfs2_sbd *sdp = gl->gl_sbd;
801 const struct gfs2_glock_operations *glops = gl->gl_ops;
802 struct gfs2_holder *gh = gl->gl_req_gh;
803 int op_done = 1;
804
805 if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
806 drop_bh(gl, ret);
807 return;
808 }
809
810 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
811 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
812 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
813
814 state_change(gl, ret & LM_OUT_ST_MASK);
815
816 /* Deal with each possible exit condition */
817
818 if (!gh) {
819 gl->gl_stamp = jiffies;
820 if (ret & LM_OUT_CANCELED) {
821 op_done = 0;
822 } else {
823 spin_lock(&gl->gl_spin);
824 if (gl->gl_state != gl->gl_demote_state) {
825 spin_unlock(&gl->gl_spin);
826 gfs2_glock_drop_th(gl);
827 gfs2_glock_put(gl);
828 return;
829 }
830 gfs2_demote_wake(gl);
831 spin_unlock(&gl->gl_spin);
832 }
833 } else {
834 spin_lock(&gl->gl_spin);
835 if (ret & LM_OUT_CONV_DEADLK) {
836 gh->gh_error = 0;
837 set_bit(GLF_CONV_DEADLK, &gl->gl_flags);
838 spin_unlock(&gl->gl_spin);
839 gfs2_glock_drop_th(gl);
840 gfs2_glock_put(gl);
841 return;
842 }
843 list_del_init(&gh->gh_list);
844 gh->gh_error = -EIO;
845 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
846 goto out;
847 gh->gh_error = GLR_CANCELED;
848 if (ret & LM_OUT_CANCELED)
849 goto out;
850 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
851 list_add_tail(&gh->gh_list, &gl->gl_holders);
852 gh->gh_error = 0;
853 set_bit(HIF_HOLDER, &gh->gh_iflags);
854 set_bit(HIF_FIRST, &gh->gh_iflags);
855 op_done = 0;
856 goto out;
857 }
858 gh->gh_error = GLR_TRYFAILED;
859 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
860 goto out;
861 gh->gh_error = -EINVAL;
862 if (gfs2_assert_withdraw(sdp, 0) == -1)
863 fs_err(sdp, "ret = 0x%.8X\n", ret);
864out:
865 spin_unlock(&gl->gl_spin);
866 }
867
868 if (glops->go_xmote_bh)
869 glops->go_xmote_bh(gl);
870
871 if (op_done) {
872 spin_lock(&gl->gl_spin);
873 gl->gl_req_gh = NULL;
874 clear_bit(GLF_LOCK, &gl->gl_flags);
875 spin_unlock(&gl->gl_spin);
876 }
877
878 gfs2_glock_put(gl);
879
880 if (gh)
881 gfs2_holder_wake(gh);
882}
883
884static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
885 unsigned int cur_state, unsigned int req_state,
886 unsigned int flags)
887{
888 int ret = 0;
889 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
890 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
891 req_state, flags);
892 return ret;
893}
894
895/**
896 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
897 * @gl: The glock in question
898 * @state: the requested state
899 * @flags: modifier flags to the lock call
900 *
901 */
902
903static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
904{
905 struct gfs2_sbd *sdp = gl->gl_sbd;
906 int flags = gh ? gh->gh_flags : 0;
907 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
908 const struct gfs2_glock_operations *glops = gl->gl_ops;
909 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
910 LM_FLAG_NOEXP | LM_FLAG_ANY |
911 LM_FLAG_PRIORITY);
912 unsigned int lck_ret;
913
914 if (glops->go_xmote_th)
915 glops->go_xmote_th(gl);
916 if (state == LM_ST_DEFERRED && glops->go_inval)
917 glops->go_inval(gl, DIO_METADATA);
918
919 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
920 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
921 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
922 gfs2_assert_warn(sdp, state != gl->gl_state);
923
924 gfs2_glock_hold(gl);
925
926 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
927
928 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
929 return;
930
931 if (lck_ret & LM_OUT_ASYNC)
932 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
933 else
934 xmote_bh(gl, lck_ret);
935}
936
937static unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
938 unsigned int cur_state)
939{
940 int ret = 0;
941 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
942 ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
943 return ret;
944}
945
946/**
947 * gfs2_glock_drop_th - call into the lock module to unlock a lock
948 * @gl: the glock
949 *
950 */
951
952static void gfs2_glock_drop_th(struct gfs2_glock *gl)
953{
954 struct gfs2_sbd *sdp = gl->gl_sbd;
955 const struct gfs2_glock_operations *glops = gl->gl_ops;
956 unsigned int ret;
957
958 if (glops->go_xmote_th)
959 glops->go_xmote_th(gl);
960 if (glops->go_inval)
961 glops->go_inval(gl, DIO_METADATA);
962
963 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
964 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
965 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
966
967 gfs2_glock_hold(gl);
968
969 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
970
971 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
972 return;
973
974 if (!ret)
975 drop_bh(gl, ret);
976 else
977 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
978}
979
980/**
981 * do_cancels - cancel requests for locks stuck waiting on an expire flag
982 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
983 *
984 * Don't cancel GL_NOCANCEL requests.
985 */
986
987static void do_cancels(struct gfs2_holder *gh)
988{
989 struct gfs2_glock *gl = gh->gh_gl;
990 struct gfs2_sbd *sdp = gl->gl_sbd;
991
992 spin_lock(&gl->gl_spin);
993
994 while (gl->gl_req_gh != gh &&
995 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
996 !list_empty(&gh->gh_list)) {
997 if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
998 spin_unlock(&gl->gl_spin);
999 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1000 sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
1001 msleep(100);
1002 spin_lock(&gl->gl_spin);
1003 } else {
1004 spin_unlock(&gl->gl_spin);
1005 msleep(100);
1006 spin_lock(&gl->gl_spin);
1007 }
1008 }
1009
1010 spin_unlock(&gl->gl_spin);
1011}
1012
1013/**
1014 * glock_wait_internal - wait on a glock acquisition
1015 * @gh: the glock holder 829 * @gh: the glock holder
1016 * 830 *
1017 * Returns: 0 on success 831 * Returns: 0 on success
1018 */ 832 */
1019 833
1020static int glock_wait_internal(struct gfs2_holder *gh) 834int gfs2_glock_wait(struct gfs2_holder *gh)
1021{ 835{
1022 struct gfs2_glock *gl = gh->gh_gl;
1023 struct gfs2_sbd *sdp = gl->gl_sbd;
1024 const struct gfs2_glock_operations *glops = gl->gl_ops;
1025
1026 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1027 return -EIO;
1028
1029 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1030 spin_lock(&gl->gl_spin);
1031 if (gl->gl_req_gh != gh &&
1032 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1033 !list_empty(&gh->gh_list)) {
1034 list_del_init(&gh->gh_list);
1035 gh->gh_error = GLR_TRYFAILED;
1036 run_queue(gl);
1037 spin_unlock(&gl->gl_spin);
1038 return gh->gh_error;
1039 }
1040 spin_unlock(&gl->gl_spin);
1041 }
1042
1043 if (gh->gh_flags & LM_FLAG_PRIORITY)
1044 do_cancels(gh);
1045
1046 wait_on_holder(gh); 836 wait_on_holder(gh);
1047 if (gh->gh_error)
1048 return gh->gh_error;
1049
1050 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1051 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1052 gh->gh_flags));
1053
1054 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1055 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1056
1057 if (glops->go_lock) {
1058 gh->gh_error = glops->go_lock(gh);
1059 if (gh->gh_error) {
1060 spin_lock(&gl->gl_spin);
1061 list_del_init(&gh->gh_list);
1062 spin_unlock(&gl->gl_spin);
1063 }
1064 }
1065
1066 spin_lock(&gl->gl_spin);
1067 gl->gl_req_gh = NULL;
1068 clear_bit(GLF_LOCK, &gl->gl_flags);
1069 run_queue(gl);
1070 spin_unlock(&gl->gl_spin);
1071 }
1072
1073 return gh->gh_error; 837 return gh->gh_error;
1074} 838}
1075 839
1076static inline struct gfs2_holder * 840void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
1077find_holder_by_owner(struct list_head *head, struct pid *pid)
1078{
1079 struct gfs2_holder *gh;
1080
1081 list_for_each_entry(gh, head, gh_list) {
1082 if (gh->gh_owner_pid == pid)
1083 return gh;
1084 }
1085
1086 return NULL;
1087}
1088
1089static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1090{ 841{
1091 va_list args; 842 va_list args;
1092 843
1093 va_start(args, fmt); 844 va_start(args, fmt);
1094 if (gi) { 845 if (seq) {
846 struct gfs2_glock_iter *gi = seq->private;
1095 vsprintf(gi->string, fmt, args); 847 vsprintf(gi->string, fmt, args);
1096 seq_printf(gi->seq, gi->string); 848 seq_printf(seq, gi->string);
1097 } 849 } else {
1098 else 850 printk(KERN_ERR " ");
1099 vprintk(fmt, args); 851 vprintk(fmt, args);
852 }
1100 va_end(args); 853 va_end(args);
1101} 854}
1102 855
@@ -1104,50 +857,75 @@ static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1104 * add_to_queue - Add a holder to the wait queue (but look for recursion) 857 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1105 * @gh: the holder structure to add 858 * @gh: the holder structure to add
1106 * 859 *
860 * Eventually we should move the recursive locking trap to a
861 * debugging option or something like that. This is the fast
862 * path and needs to have the minimum number of distractions.
863 *
1107 */ 864 */
1108 865
1109static void add_to_queue(struct gfs2_holder *gh) 866static inline void add_to_queue(struct gfs2_holder *gh)
1110{ 867{
1111 struct gfs2_glock *gl = gh->gh_gl; 868 struct gfs2_glock *gl = gh->gh_gl;
1112 struct gfs2_holder *existing; 869 struct gfs2_sbd *sdp = gl->gl_sbd;
870 struct list_head *insert_pt = NULL;
871 struct gfs2_holder *gh2;
872 int try_lock = 0;
1113 873
1114 BUG_ON(gh->gh_owner_pid == NULL); 874 BUG_ON(gh->gh_owner_pid == NULL);
1115 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 875 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1116 BUG(); 876 BUG();
1117 877
1118 if (!(gh->gh_flags & GL_FLOCK)) { 878 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1119 existing = find_holder_by_owner(&gl->gl_holders, 879 if (test_bit(GLF_LOCK, &gl->gl_flags))
1120 gh->gh_owner_pid); 880 try_lock = 1;
1121 if (existing) { 881 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1122 print_symbol(KERN_WARNING "original: %s\n", 882 goto fail;
1123 existing->gh_ip); 883 }
1124 printk(KERN_INFO "pid : %d\n", 884
1125 pid_nr(existing->gh_owner_pid)); 885 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1126 printk(KERN_INFO "lock type : %d lock state : %d\n", 886 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1127 existing->gh_gl->gl_name.ln_type, 887 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1128 existing->gh_gl->gl_state); 888 goto trap_recursive;
1129 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); 889 if (try_lock &&
1130 printk(KERN_INFO "pid : %d\n", 890 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
1131 pid_nr(gh->gh_owner_pid)); 891 !may_grant(gl, gh)) {
1132 printk(KERN_INFO "lock type : %d lock state : %d\n", 892fail:
1133 gl->gl_name.ln_type, gl->gl_state); 893 gh->gh_error = GLR_TRYFAILED;
1134 BUG(); 894 gfs2_holder_wake(gh);
1135 } 895 return;
1136
1137 existing = find_holder_by_owner(&gl->gl_waiters3,
1138 gh->gh_owner_pid);
1139 if (existing) {
1140 print_symbol(KERN_WARNING "original: %s\n",
1141 existing->gh_ip);
1142 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1143 BUG();
1144 } 896 }
897 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
898 continue;
899 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
900 insert_pt = &gh2->gh_list;
901 }
902 if (likely(insert_pt == NULL)) {
903 list_add_tail(&gh->gh_list, &gl->gl_holders);
904 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
905 goto do_cancel;
906 return;
1145 } 907 }
908 list_add_tail(&gh->gh_list, insert_pt);
909do_cancel:
910 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
911 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
912 spin_unlock(&gl->gl_spin);
913 sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
914 spin_lock(&gl->gl_spin);
915 }
916 return;
1146 917
1147 if (gh->gh_flags & LM_FLAG_PRIORITY) 918trap_recursive:
1148 list_add(&gh->gh_list, &gl->gl_waiters3); 919 print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1149 else 920 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1150 list_add_tail(&gh->gh_list, &gl->gl_waiters3); 921 printk(KERN_ERR "lock type: %d req lock state : %d\n",
922 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
923 print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
924 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
925 printk(KERN_ERR "lock type: %d req lock state : %d\n",
926 gh->gh_gl->gl_name.ln_type, gh->gh_state);
927 __dump_glock(NULL, gl);
928 BUG();
1151} 929}
1152 930
1153/** 931/**
@@ -1165,24 +943,16 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
1165 struct gfs2_sbd *sdp = gl->gl_sbd; 943 struct gfs2_sbd *sdp = gl->gl_sbd;
1166 int error = 0; 944 int error = 0;
1167 945
1168restart: 946 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1169 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1170 set_bit(HIF_ABORTED, &gh->gh_iflags);
1171 return -EIO; 947 return -EIO;
1172 }
1173 948
1174 spin_lock(&gl->gl_spin); 949 spin_lock(&gl->gl_spin);
1175 add_to_queue(gh); 950 add_to_queue(gh);
1176 run_queue(gl); 951 run_queue(gl, 1);
1177 spin_unlock(&gl->gl_spin); 952 spin_unlock(&gl->gl_spin);
1178 953
1179 if (!(gh->gh_flags & GL_ASYNC)) { 954 if (!(gh->gh_flags & GL_ASYNC))
1180 error = glock_wait_internal(gh); 955 error = gfs2_glock_wait(gh);
1181 if (error == GLR_CANCELED) {
1182 msleep(100);
1183 goto restart;
1184 }
1185 }
1186 956
1187 return error; 957 return error;
1188} 958}
@@ -1196,48 +966,7 @@ restart:
1196 966
1197int gfs2_glock_poll(struct gfs2_holder *gh) 967int gfs2_glock_poll(struct gfs2_holder *gh)
1198{ 968{
1199 struct gfs2_glock *gl = gh->gh_gl; 969 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1200 int ready = 0;
1201
1202 spin_lock(&gl->gl_spin);
1203
1204 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1205 ready = 1;
1206 else if (list_empty(&gh->gh_list)) {
1207 if (gh->gh_error == GLR_CANCELED) {
1208 spin_unlock(&gl->gl_spin);
1209 msleep(100);
1210 if (gfs2_glock_nq(gh))
1211 return 1;
1212 return 0;
1213 } else
1214 ready = 1;
1215 }
1216
1217 spin_unlock(&gl->gl_spin);
1218
1219 return ready;
1220}
1221
1222/**
1223 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1224 * @gh: the holder structure
1225 *
1226 * Returns: 0, GLR_TRYFAILED, or errno on failure
1227 */
1228
1229int gfs2_glock_wait(struct gfs2_holder *gh)
1230{
1231 int error;
1232
1233 error = glock_wait_internal(gh);
1234 if (error == GLR_CANCELED) {
1235 msleep(100);
1236 gh->gh_flags &= ~GL_ASYNC;
1237 error = gfs2_glock_nq(gh);
1238 }
1239
1240 return error;
1241} 970}
1242 971
1243/** 972/**
@@ -1251,26 +980,30 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1251 struct gfs2_glock *gl = gh->gh_gl; 980 struct gfs2_glock *gl = gh->gh_gl;
1252 const struct gfs2_glock_operations *glops = gl->gl_ops; 981 const struct gfs2_glock_operations *glops = gl->gl_ops;
1253 unsigned delay = 0; 982 unsigned delay = 0;
983 int fast_path = 0;
1254 984
985 spin_lock(&gl->gl_spin);
1255 if (gh->gh_flags & GL_NOCACHE) 986 if (gh->gh_flags & GL_NOCACHE)
1256 handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 987 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1257 988
1258 gfs2_glmutex_lock(gl);
1259
1260 spin_lock(&gl->gl_spin);
1261 list_del_init(&gh->gh_list); 989 list_del_init(&gh->gh_list);
1262 990 if (find_first_holder(gl) == NULL) {
1263 if (list_empty(&gl->gl_holders)) {
1264 if (glops->go_unlock) { 991 if (glops->go_unlock) {
992 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1265 spin_unlock(&gl->gl_spin); 993 spin_unlock(&gl->gl_spin);
1266 glops->go_unlock(gh); 994 glops->go_unlock(gh);
1267 spin_lock(&gl->gl_spin); 995 spin_lock(&gl->gl_spin);
996 clear_bit(GLF_LOCK, &gl->gl_flags);
1268 } 997 }
1269 gl->gl_stamp = jiffies; 998 gl->gl_stamp = jiffies;
999 if (list_empty(&gl->gl_holders) &&
1000 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1001 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1002 fast_path = 1;
1270 } 1003 }
1271
1272 clear_bit(GLF_LOCK, &gl->gl_flags);
1273 spin_unlock(&gl->gl_spin); 1004 spin_unlock(&gl->gl_spin);
1005 if (likely(fast_path))
1006 return;
1274 1007
1275 gfs2_glock_hold(gl); 1008 gfs2_glock_hold(gl);
1276 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1009 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -1469,20 +1202,14 @@ int gfs2_lvb_hold(struct gfs2_glock *gl)
1469{ 1202{
1470 int error; 1203 int error;
1471 1204
1472 gfs2_glmutex_lock(gl);
1473
1474 if (!atomic_read(&gl->gl_lvb_count)) { 1205 if (!atomic_read(&gl->gl_lvb_count)) {
1475 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); 1206 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1476 if (error) { 1207 if (error)
1477 gfs2_glmutex_unlock(gl);
1478 return error; 1208 return error;
1479 }
1480 gfs2_glock_hold(gl); 1209 gfs2_glock_hold(gl);
1481 } 1210 }
1482 atomic_inc(&gl->gl_lvb_count); 1211 atomic_inc(&gl->gl_lvb_count);
1483 1212
1484 gfs2_glmutex_unlock(gl);
1485
1486 return 0; 1213 return 0;
1487} 1214}
1488 1215
@@ -1497,8 +1224,6 @@ void gfs2_lvb_unhold(struct gfs2_glock *gl)
1497 struct gfs2_sbd *sdp = gl->gl_sbd; 1224 struct gfs2_sbd *sdp = gl->gl_sbd;
1498 1225
1499 gfs2_glock_hold(gl); 1226 gfs2_glock_hold(gl);
1500 gfs2_glmutex_lock(gl);
1501
1502 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); 1227 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1503 if (atomic_dec_and_test(&gl->gl_lvb_count)) { 1228 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1504 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 1229 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
@@ -1506,8 +1231,6 @@ void gfs2_lvb_unhold(struct gfs2_glock *gl)
1506 gl->gl_lvb = NULL; 1231 gl->gl_lvb = NULL;
1507 gfs2_glock_put(gl); 1232 gfs2_glock_put(gl);
1508 } 1233 }
1509
1510 gfs2_glmutex_unlock(gl);
1511 gfs2_glock_put(gl); 1234 gfs2_glock_put(gl);
1512} 1235}
1513 1236
@@ -1527,7 +1250,9 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1527 if (time_before(now, holdtime)) 1250 if (time_before(now, holdtime))
1528 delay = holdtime - now; 1251 delay = holdtime - now;
1529 1252
1253 spin_lock(&gl->gl_spin);
1530 handle_callback(gl, state, 1, delay); 1254 handle_callback(gl, state, 1, delay);
1255 spin_unlock(&gl->gl_spin);
1531 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1256 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1532 gfs2_glock_put(gl); 1257 gfs2_glock_put(gl);
1533} 1258}
@@ -1568,7 +1293,8 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1568 gl = gfs2_glock_find(sdp, &async->lc_name); 1293 gl = gfs2_glock_find(sdp, &async->lc_name);
1569 if (gfs2_assert_warn(sdp, gl)) 1294 if (gfs2_assert_warn(sdp, gl))
1570 return; 1295 return;
1571 xmote_bh(gl, async->lc_ret); 1296 gl->gl_reply = async->lc_ret;
1297 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1572 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1298 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1573 gfs2_glock_put(gl); 1299 gfs2_glock_put(gl);
1574 up_read(&gfs2_umount_flush_sem); 1300 up_read(&gfs2_umount_flush_sem);
@@ -1646,6 +1372,7 @@ void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1646void gfs2_reclaim_glock(struct gfs2_sbd *sdp) 1372void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1647{ 1373{
1648 struct gfs2_glock *gl; 1374 struct gfs2_glock *gl;
1375 int done_callback = 0;
1649 1376
1650 spin_lock(&sdp->sd_reclaim_lock); 1377 spin_lock(&sdp->sd_reclaim_lock);
1651 if (list_empty(&sdp->sd_reclaim_list)) { 1378 if (list_empty(&sdp->sd_reclaim_list)) {
@@ -1660,14 +1387,16 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1660 atomic_dec(&sdp->sd_reclaim_count); 1387 atomic_dec(&sdp->sd_reclaim_count);
1661 atomic_inc(&sdp->sd_reclaimed); 1388 atomic_inc(&sdp->sd_reclaimed);
1662 1389
1663 if (gfs2_glmutex_trylock(gl)) { 1390 spin_lock(&gl->gl_spin);
1664 if (list_empty(&gl->gl_holders) && 1391 if (find_first_holder(gl) == NULL &&
1665 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1392 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) {
1666 handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1393 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1667 gfs2_glmutex_unlock(gl); 1394 done_callback = 1;
1668 } 1395 }
1669 1396 spin_unlock(&gl->gl_spin);
1670 gfs2_glock_put(gl); 1397 if (!done_callback ||
1398 queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1399 gfs2_glock_put(gl);
1671} 1400}
1672 1401
1673/** 1402/**
@@ -1724,18 +1453,14 @@ static void scan_glock(struct gfs2_glock *gl)
1724{ 1453{
1725 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) 1454 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1726 return; 1455 return;
1456 if (test_bit(GLF_LOCK, &gl->gl_flags))
1457 return;
1727 1458
1728 if (gfs2_glmutex_trylock(gl)) { 1459 spin_lock(&gl->gl_spin);
1729 if (list_empty(&gl->gl_holders) && 1460 if (find_first_holder(gl) == NULL &&
1730 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1461 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1731 goto out_schedule; 1462 gfs2_glock_schedule_for_reclaim(gl);
1732 gfs2_glmutex_unlock(gl); 1463 spin_unlock(&gl->gl_spin);
1733 }
1734 return;
1735
1736out_schedule:
1737 gfs2_glmutex_unlock(gl);
1738 gfs2_glock_schedule_for_reclaim(gl);
1739} 1464}
1740 1465
1741/** 1466/**
@@ -1760,12 +1485,13 @@ static void clear_glock(struct gfs2_glock *gl)
1760 spin_unlock(&sdp->sd_reclaim_lock); 1485 spin_unlock(&sdp->sd_reclaim_lock);
1761 } 1486 }
1762 1487
1763 if (gfs2_glmutex_trylock(gl)) { 1488 spin_lock(&gl->gl_spin);
1764 if (list_empty(&gl->gl_holders) && 1489 if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1765 gl->gl_state != LM_ST_UNLOCKED) 1490 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1766 handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1491 spin_unlock(&gl->gl_spin);
1767 gfs2_glmutex_unlock(gl); 1492 gfs2_glock_hold(gl);
1768 } 1493 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1494 gfs2_glock_put(gl);
1769} 1495}
1770 1496
1771/** 1497/**
@@ -1810,180 +1536,164 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1810 } 1536 }
1811} 1537}
1812 1538
1813/* 1539static const char *state2str(unsigned state)
1814 * Diagnostic routines to help debug distributed deadlock
1815 */
1816
1817static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1818 unsigned long address)
1819{ 1540{
1820 char buffer[KSYM_SYMBOL_LEN]; 1541 switch(state) {
1821 1542 case LM_ST_UNLOCKED:
1822 sprint_symbol(buffer, address); 1543 return "UN";
1823 print_dbg(gi, fmt, buffer); 1544 case LM_ST_SHARED:
1545 return "SH";
1546 case LM_ST_DEFERRED:
1547 return "DF";
1548 case LM_ST_EXCLUSIVE:
1549 return "EX";
1550 }
1551 return "??";
1552}
1553
1554static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1555{
1556 char *p = buf;
1557 if (flags & LM_FLAG_TRY)
1558 *p++ = 't';
1559 if (flags & LM_FLAG_TRY_1CB)
1560 *p++ = 'T';
1561 if (flags & LM_FLAG_NOEXP)
1562 *p++ = 'e';
1563 if (flags & LM_FLAG_ANY)
1564 *p++ = 'a';
1565 if (flags & LM_FLAG_PRIORITY)
1566 *p++ = 'p';
1567 if (flags & GL_ASYNC)
1568 *p++ = 'a';
1569 if (flags & GL_EXACT)
1570 *p++ = 'E';
1571 if (flags & GL_ATIME)
1572 *p++ = 'a';
1573 if (flags & GL_NOCACHE)
1574 *p++ = 'c';
1575 if (test_bit(HIF_HOLDER, &iflags))
1576 *p++ = 'H';
1577 if (test_bit(HIF_WAIT, &iflags))
1578 *p++ = 'W';
1579 if (test_bit(HIF_FIRST, &iflags))
1580 *p++ = 'F';
1581 *p = 0;
1582 return buf;
1824} 1583}
1825 1584
1826/** 1585/**
1827 * dump_holder - print information about a glock holder 1586 * dump_holder - print information about a glock holder
1828 * @str: a string naming the type of holder 1587 * @seq: the seq_file struct
1829 * @gh: the glock holder 1588 * @gh: the glock holder
1830 * 1589 *
1831 * Returns: 0 on success, -ENOBUFS when we run out of space 1590 * Returns: 0 on success, -ENOBUFS when we run out of space
1832 */ 1591 */
1833 1592
1834static int dump_holder(struct glock_iter *gi, char *str, 1593static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1835 struct gfs2_holder *gh)
1836{ 1594{
1837 unsigned int x; 1595 struct task_struct *gh_owner = NULL;
1838 struct task_struct *gh_owner; 1596 char buffer[KSYM_SYMBOL_LEN];
1597 char flags_buf[32];
1839 1598
1840 print_dbg(gi, " %s\n", str); 1599 sprint_symbol(buffer, gh->gh_ip);
1841 if (gh->gh_owner_pid) { 1600 if (gh->gh_owner_pid)
1842 print_dbg(gi, " owner = %ld ",
1843 (long)pid_nr(gh->gh_owner_pid));
1844 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 1601 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1845 if (gh_owner) 1602 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1846 print_dbg(gi, "(%s)\n", gh_owner->comm); 1603 state2str(gh->gh_state),
1847 else 1604 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1848 print_dbg(gi, "(ended)\n"); 1605 gh->gh_error,
1849 } else 1606 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1850 print_dbg(gi, " owner = -1\n"); 1607 gh_owner ? gh_owner->comm : "(ended)", buffer);
1851 print_dbg(gi, " gh_state = %u\n", gh->gh_state);
1852 print_dbg(gi, " gh_flags =");
1853 for (x = 0; x < 32; x++)
1854 if (gh->gh_flags & (1 << x))
1855 print_dbg(gi, " %u", x);
1856 print_dbg(gi, " \n");
1857 print_dbg(gi, " error = %d\n", gh->gh_error);
1858 print_dbg(gi, " gh_iflags =");
1859 for (x = 0; x < 32; x++)
1860 if (test_bit(x, &gh->gh_iflags))
1861 print_dbg(gi, " %u", x);
1862 print_dbg(gi, " \n");
1863 gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip);
1864
1865 return 0; 1608 return 0;
1866} 1609}
1867 1610
1868/** 1611static const char *gflags2str(char *buf, const unsigned long *gflags)
1869 * dump_inode - print information about an inode 1612{
1870 * @ip: the inode 1613 char *p = buf;
1871 * 1614 if (test_bit(GLF_LOCK, gflags))
1872 * Returns: 0 on success, -ENOBUFS when we run out of space 1615 *p++ = 'l';
1873 */ 1616 if (test_bit(GLF_STICKY, gflags))
1874 1617 *p++ = 's';
1875static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip) 1618 if (test_bit(GLF_DEMOTE, gflags))
1876{ 1619 *p++ = 'D';
1877 unsigned int x; 1620 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1878 1621 *p++ = 'd';
1879 print_dbg(gi, " Inode:\n"); 1622 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1880 print_dbg(gi, " num = %llu/%llu\n", 1623 *p++ = 'p';
1881 (unsigned long long)ip->i_no_formal_ino, 1624 if (test_bit(GLF_DIRTY, gflags))
1882 (unsigned long long)ip->i_no_addr); 1625 *p++ = 'y';
1883 print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); 1626 if (test_bit(GLF_LFLUSH, gflags))
1884 print_dbg(gi, " i_flags ="); 1627 *p++ = 'f';
1885 for (x = 0; x < 32; x++) 1628 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1886 if (test_bit(x, &ip->i_flags)) 1629 *p++ = 'i';
1887 print_dbg(gi, " %u", x); 1630 if (test_bit(GLF_REPLY_PENDING, gflags))
1888 print_dbg(gi, " \n"); 1631 *p++ = 'r';
1889 return 0; 1632 *p = 0;
1633 return buf;
1890} 1634}
1891 1635
1892/** 1636/**
1893 * dump_glock - print information about a glock 1637 * __dump_glock - print information about a glock
1638 * @seq: The seq_file struct
1894 * @gl: the glock 1639 * @gl: the glock
1895 * @count: where we are in the buffer 1640 *
1641 * The file format is as follows:
1642 * One line per object, capital letters are used to indicate objects
1643 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1644 * other objects are indented by a single space and follow the glock to
1645 * which they are related. Fields are indicated by lower case letters
1646 * followed by a colon and the field value, except for strings which are in
1647 * [] so that its possible to see if they are composed of spaces for
1648 * example. The field's are n = number (id of the object), f = flags,
1649 * t = type, s = state, r = refcount, e = error, p = pid.
1896 * 1650 *
1897 * Returns: 0 on success, -ENOBUFS when we run out of space 1651 * Returns: 0 on success, -ENOBUFS when we run out of space
1898 */ 1652 */
1899 1653
1900static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) 1654static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1901{ 1655{
1902 struct gfs2_holder *gh; 1656 const struct gfs2_glock_operations *glops = gl->gl_ops;
1903 unsigned int x; 1657 unsigned long long dtime;
1904 int error = -ENOBUFS; 1658 const struct gfs2_holder *gh;
1905 struct task_struct *gl_owner; 1659 char gflags_buf[32];
1660 int error = 0;
1906 1661
1907 spin_lock(&gl->gl_spin); 1662 dtime = jiffies - gl->gl_demote_time;
1663 dtime *= 1000000/HZ; /* demote time in uSec */
1664 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1665 dtime = 0;
1666 gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n",
1667 state2str(gl->gl_state),
1668 gl->gl_name.ln_type,
1669 (unsigned long long)gl->gl_name.ln_number,
1670 gflags2str(gflags_buf, &gl->gl_flags),
1671 state2str(gl->gl_target),
1672 state2str(gl->gl_demote_state), dtime,
1673 atomic_read(&gl->gl_lvb_count),
1674 atomic_read(&gl->gl_ail_count),
1675 atomic_read(&gl->gl_ref));
1908 1676
1909 print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1910 (unsigned long long)gl->gl_name.ln_number);
1911 print_dbg(gi, " gl_flags =");
1912 for (x = 0; x < 32; x++) {
1913 if (test_bit(x, &gl->gl_flags))
1914 print_dbg(gi, " %u", x);
1915 }
1916 if (!test_bit(GLF_LOCK, &gl->gl_flags))
1917 print_dbg(gi, " (unlocked)");
1918 print_dbg(gi, " \n");
1919 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1920 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
1921 if (gl->gl_owner_pid) {
1922 gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID);
1923 if (gl_owner)
1924 print_dbg(gi, " gl_owner = pid %d (%s)\n",
1925 pid_nr(gl->gl_owner_pid), gl_owner->comm);
1926 else
1927 print_dbg(gi, " gl_owner = %d (ended)\n",
1928 pid_nr(gl->gl_owner_pid));
1929 } else
1930 print_dbg(gi, " gl_owner = -1\n");
1931 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1932 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1933 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1934 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
1935 print_dbg(gi, " reclaim = %s\n",
1936 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1937 if (gl->gl_aspace)
1938 print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1939 gl->gl_aspace->i_mapping->nrpages);
1940 else
1941 print_dbg(gi, " aspace = no\n");
1942 print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count));
1943 if (gl->gl_req_gh) {
1944 error = dump_holder(gi, "Request", gl->gl_req_gh);
1945 if (error)
1946 goto out;
1947 }
1948 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1677 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1949 error = dump_holder(gi, "Holder", gh); 1678 error = dump_holder(seq, gh);
1950 if (error)
1951 goto out;
1952 }
1953 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1954 error = dump_holder(gi, "Waiter1", gh);
1955 if (error)
1956 goto out;
1957 }
1958 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1959 error = dump_holder(gi, "Waiter3", gh);
1960 if (error) 1679 if (error)
1961 goto out; 1680 goto out;
1962 } 1681 }
1963 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { 1682 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1964 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n", 1683 error = glops->go_dump(seq, gl);
1965 gl->gl_demote_state, (unsigned long long)
1966 (jiffies - gl->gl_demote_time)*(1000000/HZ));
1967 }
1968 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1969 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1970 list_empty(&gl->gl_holders)) {
1971 error = dump_inode(gi, gl->gl_object);
1972 if (error)
1973 goto out;
1974 } else {
1975 error = -ENOBUFS;
1976 print_dbg(gi, " Inode: busy\n");
1977 }
1978 }
1979
1980 error = 0;
1981
1982out: 1684out:
1983 spin_unlock(&gl->gl_spin);
1984 return error; 1685 return error;
1985} 1686}
1986 1687
1688static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1689{
1690 int ret;
1691 spin_lock(&gl->gl_spin);
1692 ret = __dump_glock(seq, gl);
1693 spin_unlock(&gl->gl_spin);
1694 return ret;
1695}
1696
1987/** 1697/**
1988 * gfs2_dump_lockstate - print out the current lockstate 1698 * gfs2_dump_lockstate - print out the current lockstate
1989 * @sdp: the filesystem 1699 * @sdp: the filesystem
@@ -2086,7 +1796,7 @@ void gfs2_glock_exit(void)
2086module_param(scand_secs, uint, S_IRUGO|S_IWUSR); 1796module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2087MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); 1797MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2088 1798
2089static int gfs2_glock_iter_next(struct glock_iter *gi) 1799static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
2090{ 1800{
2091 struct gfs2_glock *gl; 1801 struct gfs2_glock *gl;
2092 1802
@@ -2104,7 +1814,7 @@ restart:
2104 gfs2_glock_put(gl); 1814 gfs2_glock_put(gl);
2105 if (gl && gi->gl == NULL) 1815 if (gl && gi->gl == NULL)
2106 gi->hash++; 1816 gi->hash++;
2107 while(gi->gl == NULL) { 1817 while (gi->gl == NULL) {
2108 if (gi->hash >= GFS2_GL_HASH_SIZE) 1818 if (gi->hash >= GFS2_GL_HASH_SIZE)
2109 return 1; 1819 return 1;
2110 read_lock(gl_lock_addr(gi->hash)); 1820 read_lock(gl_lock_addr(gi->hash));
@@ -2122,58 +1832,34 @@ restart:
2122 return 0; 1832 return 0;
2123} 1833}
2124 1834
2125static void gfs2_glock_iter_free(struct glock_iter *gi) 1835static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
2126{ 1836{
2127 if (gi->gl) 1837 if (gi->gl)
2128 gfs2_glock_put(gi->gl); 1838 gfs2_glock_put(gi->gl);
2129 kfree(gi);
2130}
2131
2132static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2133{
2134 struct glock_iter *gi;
2135
2136 gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2137 if (!gi)
2138 return NULL;
2139
2140 gi->sdp = sdp;
2141 gi->hash = 0;
2142 gi->seq = NULL;
2143 gi->gl = NULL; 1839 gi->gl = NULL;
2144 memset(gi->string, 0, sizeof(gi->string));
2145
2146 if (gfs2_glock_iter_next(gi)) {
2147 gfs2_glock_iter_free(gi);
2148 return NULL;
2149 }
2150
2151 return gi;
2152} 1840}
2153 1841
2154static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos) 1842static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
2155{ 1843{
2156 struct glock_iter *gi; 1844 struct gfs2_glock_iter *gi = seq->private;
2157 loff_t n = *pos; 1845 loff_t n = *pos;
2158 1846
2159 gi = gfs2_glock_iter_init(file->private); 1847 gi->hash = 0;
2160 if (!gi)
2161 return NULL;
2162 1848
2163 while(n--) { 1849 do {
2164 if (gfs2_glock_iter_next(gi)) { 1850 if (gfs2_glock_iter_next(gi)) {
2165 gfs2_glock_iter_free(gi); 1851 gfs2_glock_iter_free(gi);
2166 return NULL; 1852 return NULL;
2167 } 1853 }
2168 } 1854 } while (n--);
2169 1855
2170 return gi; 1856 return gi->gl;
2171} 1857}
2172 1858
2173static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, 1859static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
2174 loff_t *pos) 1860 loff_t *pos)
2175{ 1861{
2176 struct glock_iter *gi = iter_ptr; 1862 struct gfs2_glock_iter *gi = seq->private;
2177 1863
2178 (*pos)++; 1864 (*pos)++;
2179 1865
@@ -2182,24 +1868,18 @@ static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2182 return NULL; 1868 return NULL;
2183 } 1869 }
2184 1870
2185 return gi; 1871 return gi->gl;
2186} 1872}
2187 1873
2188static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr) 1874static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
2189{ 1875{
2190 struct glock_iter *gi = iter_ptr; 1876 struct gfs2_glock_iter *gi = seq->private;
2191 if (gi) 1877 gfs2_glock_iter_free(gi);
2192 gfs2_glock_iter_free(gi);
2193} 1878}
2194 1879
2195static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr) 1880static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2196{ 1881{
2197 struct glock_iter *gi = iter_ptr; 1882 return dump_glock(seq, iter_ptr);
2198
2199 gi->seq = file;
2200 dump_glock(gi, gi->gl);
2201
2202 return 0;
2203} 1883}
2204 1884
2205static const struct seq_operations gfs2_glock_seq_ops = { 1885static const struct seq_operations gfs2_glock_seq_ops = {
@@ -2211,17 +1891,14 @@ static const struct seq_operations gfs2_glock_seq_ops = {
2211 1891
2212static int gfs2_debugfs_open(struct inode *inode, struct file *file) 1892static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2213{ 1893{
2214 struct seq_file *seq; 1894 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
2215 int ret; 1895 sizeof(struct gfs2_glock_iter));
2216 1896 if (ret == 0) {
2217 ret = seq_open(file, &gfs2_glock_seq_ops); 1897 struct seq_file *seq = file->private_data;
2218 if (ret) 1898 struct gfs2_glock_iter *gi = seq->private;
2219 return ret; 1899 gi->sdp = inode->i_private;
2220 1900 }
2221 seq = file->private_data; 1901 return ret;
2222 seq->private = inode->i_private;
2223
2224 return 0;
2225} 1902}
2226 1903
2227static const struct file_operations gfs2_debug_fops = { 1904static const struct file_operations gfs2_debug_fops = {
@@ -2229,7 +1906,7 @@ static const struct file_operations gfs2_debug_fops = {
2229 .open = gfs2_debugfs_open, 1906 .open = gfs2_debugfs_open,
2230 .read = seq_read, 1907 .read = seq_read,
2231 .llseek = seq_lseek, 1908 .llseek = seq_lseek,
2232 .release = seq_release 1909 .release = seq_release_private,
2233}; 1910};
2234 1911
2235int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 1912int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index cdad3e6f8150..7389f8ef0a31 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -26,11 +26,8 @@
26#define GL_SKIP 0x00000100 26#define GL_SKIP 0x00000100
27#define GL_ATIME 0x00000200 27#define GL_ATIME 0x00000200
28#define GL_NOCACHE 0x00000400 28#define GL_NOCACHE 0x00000400
29#define GL_FLOCK 0x00000800
30#define GL_NOCANCEL 0x00001000
31 29
32#define GLR_TRYFAILED 13 30#define GLR_TRYFAILED 13
33#define GLR_CANCELED 14
34 31
35static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) 32static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
36{ 33{
@@ -41,6 +38,8 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
41 spin_lock(&gl->gl_spin); 38 spin_lock(&gl->gl_spin);
42 pid = task_pid(current); 39 pid = task_pid(current);
43 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 40 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
41 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
42 break;
44 if (gh->gh_owner_pid == pid) 43 if (gh->gh_owner_pid == pid)
45 goto out; 44 goto out;
46 } 45 }
@@ -70,7 +69,7 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
70{ 69{
71 int ret; 70 int ret;
72 spin_lock(&gl->gl_spin); 71 spin_lock(&gl->gl_spin);
73 ret = test_bit(GLF_DEMOTE, &gl->gl_flags) || !list_empty(&gl->gl_waiters3); 72 ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
74 spin_unlock(&gl->gl_spin); 73 spin_unlock(&gl->gl_spin);
75 return ret; 74 return ret;
76} 75}
@@ -98,6 +97,7 @@ int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
98int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs); 97int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
99void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); 98void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
100void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs); 99void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
100void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
101 101
102/** 102/**
103 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock 103 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
@@ -130,7 +130,6 @@ int gfs2_lvb_hold(struct gfs2_glock *gl);
130void gfs2_lvb_unhold(struct gfs2_glock *gl); 130void gfs2_lvb_unhold(struct gfs2_glock *gl);
131 131
132void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); 132void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
133
134void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); 133void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
135void gfs2_reclaim_glock(struct gfs2_sbd *sdp); 134void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
136void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait); 135void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 07d84d16cda4..c6c318c2a0f6 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -13,6 +13,7 @@
13#include <linux/buffer_head.h> 13#include <linux/buffer_head.h>
14#include <linux/gfs2_ondisk.h> 14#include <linux/gfs2_ondisk.h>
15#include <linux/lm_interface.h> 15#include <linux/lm_interface.h>
16#include <linux/bio.h>
16 17
17#include "gfs2.h" 18#include "gfs2.h"
18#include "incore.h" 19#include "incore.h"
@@ -172,26 +173,6 @@ static void inode_go_sync(struct gfs2_glock *gl)
172} 173}
173 174
174/** 175/**
175 * inode_go_xmote_bh - After promoting/demoting a glock
176 * @gl: the glock
177 *
178 */
179
180static void inode_go_xmote_bh(struct gfs2_glock *gl)
181{
182 struct gfs2_holder *gh = gl->gl_req_gh;
183 struct buffer_head *bh;
184 int error;
185
186 if (gl->gl_state != LM_ST_UNLOCKED &&
187 (!gh || !(gh->gh_flags & GL_SKIP))) {
188 error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
189 if (!error)
190 brelse(bh);
191 }
192}
193
194/**
195 * inode_go_inval - prepare a inode glock to be released 176 * inode_go_inval - prepare a inode glock to be released
196 * @gl: the glock 177 * @gl: the glock
197 * @flags: 178 * @flags:
@@ -267,6 +248,26 @@ static int inode_go_lock(struct gfs2_holder *gh)
267} 248}
268 249
269/** 250/**
251 * inode_go_dump - print information about an inode
252 * @seq: The iterator
253 * @ip: the inode
254 *
255 * Returns: 0 on success, -ENOBUFS when we run out of space
256 */
257
258static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
259{
260 const struct gfs2_inode *ip = gl->gl_object;
261 if (ip == NULL)
262 return 0;
263 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%08lx\n",
264 (unsigned long long)ip->i_no_formal_ino,
265 (unsigned long long)ip->i_no_addr,
266 IF2DT(ip->i_inode.i_mode), ip->i_flags);
267 return 0;
268}
269
270/**
270 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock 271 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
271 * @gl: the glock 272 * @gl: the glock
272 * 273 *
@@ -306,6 +307,22 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
306} 307}
307 308
308/** 309/**
310 * rgrp_go_dump - print out an rgrp
311 * @seq: The iterator
312 * @gl: The glock in question
313 *
314 */
315
316static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
317{
318 const struct gfs2_rgrpd *rgd = gl->gl_object;
319 if (rgd == NULL)
320 return 0;
321 gfs2_print_dbg(seq, " R: n:%llu\n", (unsigned long long)rgd->rd_addr);
322 return 0;
323}
324
325/**
309 * trans_go_sync - promote/demote the transaction glock 326 * trans_go_sync - promote/demote the transaction glock
310 * @gl: the glock 327 * @gl: the glock
311 * @state: the requested state 328 * @state: the requested state
@@ -330,7 +347,7 @@ static void trans_go_sync(struct gfs2_glock *gl)
330 * 347 *
331 */ 348 */
332 349
333static void trans_go_xmote_bh(struct gfs2_glock *gl) 350static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
334{ 351{
335 struct gfs2_sbd *sdp = gl->gl_sbd; 352 struct gfs2_sbd *sdp = gl->gl_sbd;
336 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 353 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
@@ -338,8 +355,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
338 struct gfs2_log_header_host head; 355 struct gfs2_log_header_host head;
339 int error; 356 int error;
340 357
341 if (gl->gl_state != LM_ST_UNLOCKED && 358 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
342 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
343 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 359 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
344 360
345 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 361 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
@@ -354,6 +370,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
354 gfs2_log_pointers_init(sdp, head.lh_blkno); 370 gfs2_log_pointers_init(sdp, head.lh_blkno);
355 } 371 }
356 } 372 }
373 return 0;
357} 374}
358 375
359/** 376/**
@@ -375,12 +392,12 @@ const struct gfs2_glock_operations gfs2_meta_glops = {
375 392
376const struct gfs2_glock_operations gfs2_inode_glops = { 393const struct gfs2_glock_operations gfs2_inode_glops = {
377 .go_xmote_th = inode_go_sync, 394 .go_xmote_th = inode_go_sync,
378 .go_xmote_bh = inode_go_xmote_bh,
379 .go_inval = inode_go_inval, 395 .go_inval = inode_go_inval,
380 .go_demote_ok = inode_go_demote_ok, 396 .go_demote_ok = inode_go_demote_ok,
381 .go_lock = inode_go_lock, 397 .go_lock = inode_go_lock,
398 .go_dump = inode_go_dump,
382 .go_type = LM_TYPE_INODE, 399 .go_type = LM_TYPE_INODE,
383 .go_min_hold_time = HZ / 10, 400 .go_min_hold_time = HZ / 5,
384}; 401};
385 402
386const struct gfs2_glock_operations gfs2_rgrp_glops = { 403const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -389,8 +406,9 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
389 .go_demote_ok = rgrp_go_demote_ok, 406 .go_demote_ok = rgrp_go_demote_ok,
390 .go_lock = rgrp_go_lock, 407 .go_lock = rgrp_go_lock,
391 .go_unlock = rgrp_go_unlock, 408 .go_unlock = rgrp_go_unlock,
409 .go_dump = rgrp_go_dump,
392 .go_type = LM_TYPE_RGRP, 410 .go_type = LM_TYPE_RGRP,
393 .go_min_hold_time = HZ / 10, 411 .go_min_hold_time = HZ / 5,
394}; 412};
395 413
396const struct gfs2_glock_operations gfs2_trans_glops = { 414const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index eabe5eac41da..4b734c6e34f0 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -128,20 +128,20 @@ struct gfs2_bufdata {
128 128
129struct gfs2_glock_operations { 129struct gfs2_glock_operations {
130 void (*go_xmote_th) (struct gfs2_glock *gl); 130 void (*go_xmote_th) (struct gfs2_glock *gl);
131 void (*go_xmote_bh) (struct gfs2_glock *gl); 131 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
132 void (*go_inval) (struct gfs2_glock *gl, int flags); 132 void (*go_inval) (struct gfs2_glock *gl, int flags);
133 int (*go_demote_ok) (struct gfs2_glock *gl); 133 int (*go_demote_ok) (struct gfs2_glock *gl);
134 int (*go_lock) (struct gfs2_holder *gh); 134 int (*go_lock) (struct gfs2_holder *gh);
135 void (*go_unlock) (struct gfs2_holder *gh); 135 void (*go_unlock) (struct gfs2_holder *gh);
136 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
136 const int go_type; 137 const int go_type;
137 const unsigned long go_min_hold_time; 138 const unsigned long go_min_hold_time;
138}; 139};
139 140
140enum { 141enum {
141 /* States */ 142 /* States */
142 HIF_HOLDER = 6, 143 HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
143 HIF_FIRST = 7, 144 HIF_FIRST = 7,
144 HIF_ABORTED = 9,
145 HIF_WAIT = 10, 145 HIF_WAIT = 10,
146}; 146};
147 147
@@ -154,20 +154,20 @@ struct gfs2_holder {
154 unsigned gh_flags; 154 unsigned gh_flags;
155 155
156 int gh_error; 156 int gh_error;
157 unsigned long gh_iflags; 157 unsigned long gh_iflags; /* HIF_... */
158 unsigned long gh_ip; 158 unsigned long gh_ip;
159}; 159};
160 160
161enum { 161enum {
162 GLF_LOCK = 1, 162 GLF_LOCK = 1,
163 GLF_STICKY = 2, 163 GLF_STICKY = 2,
164 GLF_DEMOTE = 3, 164 GLF_DEMOTE = 3,
165 GLF_PENDING_DEMOTE = 4, 165 GLF_PENDING_DEMOTE = 4,
166 GLF_DIRTY = 5, 166 GLF_DEMOTE_IN_PROGRESS = 5,
167 GLF_DEMOTE_IN_PROGRESS = 6, 167 GLF_DIRTY = 6,
168 GLF_LFLUSH = 7, 168 GLF_LFLUSH = 7,
169 GLF_WAITERS2 = 8, 169 GLF_INVALIDATE_IN_PROGRESS = 8,
170 GLF_CONV_DEADLK = 9, 170 GLF_REPLY_PENDING = 9,
171}; 171};
172 172
173struct gfs2_glock { 173struct gfs2_glock {
@@ -179,19 +179,14 @@ struct gfs2_glock {
179 spinlock_t gl_spin; 179 spinlock_t gl_spin;
180 180
181 unsigned int gl_state; 181 unsigned int gl_state;
182 unsigned int gl_target;
183 unsigned int gl_reply;
182 unsigned int gl_hash; 184 unsigned int gl_hash;
183 unsigned int gl_demote_state; /* state requested by remote node */ 185 unsigned int gl_demote_state; /* state requested by remote node */
184 unsigned long gl_demote_time; /* time of first demote request */ 186 unsigned long gl_demote_time; /* time of first demote request */
185 struct pid *gl_owner_pid;
186 unsigned long gl_ip;
187 struct list_head gl_holders; 187 struct list_head gl_holders;
188 struct list_head gl_waiters1; /* HIF_MUTEX */
189 struct list_head gl_waiters3; /* HIF_PROMOTE */
190 188
191 const struct gfs2_glock_operations *gl_ops; 189 const struct gfs2_glock_operations *gl_ops;
192
193 struct gfs2_holder *gl_req_gh;
194
195 void *gl_lock; 190 void *gl_lock;
196 char *gl_lvb; 191 char *gl_lvb;
197 atomic_t gl_lvb_count; 192 atomic_t gl_lvb_count;
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
index cf7ea8abec87..fed9a67be0f1 100644
--- a/fs/gfs2/locking/dlm/lock.c
+++ b/fs/gfs2/locking/dlm/lock.c
@@ -308,6 +308,9 @@ unsigned int gdlm_lock(void *lock, unsigned int cur_state,
308{ 308{
309 struct gdlm_lock *lp = lock; 309 struct gdlm_lock *lp = lock;
310 310
311 if (req_state == LM_ST_UNLOCKED)
312 return gdlm_unlock(lock, cur_state);
313
311 clear_bit(LFL_DLM_CANCEL, &lp->flags); 314 clear_bit(LFL_DLM_CANCEL, &lp->flags);
312 if (flags & LM_FLAG_NOEXP) 315 if (flags & LM_FLAG_NOEXP)
313 set_bit(LFL_NOBLOCK, &lp->flags); 316 set_bit(LFL_NOBLOCK, &lp->flags);
diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c
index 284a5ece8d94..627bfb79bc8c 100644
--- a/fs/gfs2/locking/nolock/main.c
+++ b/fs/gfs2/locking/nolock/main.c
@@ -107,6 +107,8 @@ static void nolock_put_lock(void *lock)
107static unsigned int nolock_lock(void *lock, unsigned int cur_state, 107static unsigned int nolock_lock(void *lock, unsigned int cur_state,
108 unsigned int req_state, unsigned int flags) 108 unsigned int req_state, unsigned int flags)
109{ 109{
110 if (req_state == LM_ST_UNLOCKED)
111 return 0;
110 return req_state | LM_OUT_CACHEABLE; 112 return req_state | LM_OUT_CACHEABLE;
111} 113}
112 114
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 053e2ebbbd50..bcc668d0fadd 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -40,8 +40,6 @@ static void gfs2_init_glock_once(struct kmem_cache *cachep, void *foo)
40 INIT_HLIST_NODE(&gl->gl_list); 40 INIT_HLIST_NODE(&gl->gl_list);
41 spin_lock_init(&gl->gl_spin); 41 spin_lock_init(&gl->gl_spin);
42 INIT_LIST_HEAD(&gl->gl_holders); 42 INIT_LIST_HEAD(&gl->gl_holders);
43 INIT_LIST_HEAD(&gl->gl_waiters1);
44 INIT_LIST_HEAD(&gl->gl_waiters3);
45 gl->gl_lvb = NULL; 43 gl->gl_lvb = NULL;
46 atomic_set(&gl->gl_lvb_count, 0); 44 atomic_set(&gl->gl_lvb_count, 0);
47 INIT_LIST_HEAD(&gl->gl_reclaim); 45 INIT_LIST_HEAD(&gl->gl_reclaim);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 78d75f892f82..09853620c951 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -129,7 +129,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
129} 129}
130 130
131/** 131/**
132 * getbuf - Get a buffer with a given address space 132 * gfs2_getbuf - Get a buffer with a given address space
133 * @gl: the glock 133 * @gl: the glock
134 * @blkno: the block number (filesystem scope) 134 * @blkno: the block number (filesystem scope)
135 * @create: 1 if the buffer should be created 135 * @create: 1 if the buffer should be created
@@ -137,7 +137,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
137 * Returns: the buffer 137 * Returns: the buffer
138 */ 138 */
139 139
140static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create) 140struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
141{ 141{
142 struct address_space *mapping = gl->gl_aspace->i_mapping; 142 struct address_space *mapping = gl->gl_aspace->i_mapping;
143 struct gfs2_sbd *sdp = gl->gl_sbd; 143 struct gfs2_sbd *sdp = gl->gl_sbd;
@@ -205,7 +205,7 @@ static void meta_prep_new(struct buffer_head *bh)
205struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) 205struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
206{ 206{
207 struct buffer_head *bh; 207 struct buffer_head *bh;
208 bh = getbuf(gl, blkno, CREATE); 208 bh = gfs2_getbuf(gl, blkno, CREATE);
209 meta_prep_new(bh); 209 meta_prep_new(bh);
210 return bh; 210 return bh;
211} 211}
@@ -223,7 +223,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
223int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 223int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
224 struct buffer_head **bhp) 224 struct buffer_head **bhp)
225{ 225{
226 *bhp = getbuf(gl, blkno, CREATE); 226 *bhp = gfs2_getbuf(gl, blkno, CREATE);
227 if (!buffer_uptodate(*bhp)) { 227 if (!buffer_uptodate(*bhp)) {
228 ll_rw_block(READ_META, 1, bhp); 228 ll_rw_block(READ_META, 1, bhp);
229 if (flags & DIO_WAIT) { 229 if (flags & DIO_WAIT) {
@@ -346,7 +346,7 @@ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
346 struct buffer_head *bh; 346 struct buffer_head *bh;
347 347
348 while (blen) { 348 while (blen) {
349 bh = getbuf(ip->i_gl, bstart, NO_CREATE); 349 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
350 if (bh) { 350 if (bh) {
351 lock_buffer(bh); 351 lock_buffer(bh);
352 gfs2_log_lock(sdp); 352 gfs2_log_lock(sdp);
@@ -421,7 +421,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
421 if (extlen > max_ra) 421 if (extlen > max_ra)
422 extlen = max_ra; 422 extlen = max_ra;
423 423
424 first_bh = getbuf(gl, dblock, CREATE); 424 first_bh = gfs2_getbuf(gl, dblock, CREATE);
425 425
426 if (buffer_uptodate(first_bh)) 426 if (buffer_uptodate(first_bh))
427 goto out; 427 goto out;
@@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
432 extlen--; 432 extlen--;
433 433
434 while (extlen) { 434 while (extlen) {
435 bh = getbuf(gl, dblock, CREATE); 435 bh = gfs2_getbuf(gl, dblock, CREATE);
436 436
437 if (!buffer_uptodate(bh) && !buffer_locked(bh)) 437 if (!buffer_uptodate(bh) && !buffer_locked(bh))
438 ll_rw_block(READA, 1, &bh); 438 ll_rw_block(READA, 1, &bh);
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 73e3b1c76fe1..b1a5f3674d43 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -47,6 +47,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
47int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, 47int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno,
48 int flags, struct buffer_head **bhp); 48 int flags, struct buffer_head **bhp);
49int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh); 49int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
50struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create);
50 51
51void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh, 52void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
52 int meta); 53 int meta);
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index f55394e57cb2..2b556dd034bb 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -507,26 +507,23 @@ static int __gfs2_readpage(void *file, struct page *page)
507static int gfs2_readpage(struct file *file, struct page *page) 507static int gfs2_readpage(struct file *file, struct page *page)
508{ 508{
509 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 509 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
510 struct gfs2_holder *gh; 510 struct gfs2_holder gh;
511 int error; 511 int error;
512 512
513 gh = gfs2_glock_is_locked_by_me(ip->i_gl); 513 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
514 if (!gh) { 514 error = gfs2_glock_nq_atime(&gh);
515 gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS); 515 if (unlikely(error)) {
516 if (!gh)
517 return -ENOBUFS;
518 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh);
519 unlock_page(page); 516 unlock_page(page);
520 error = gfs2_glock_nq_atime(gh); 517 goto out;
521 if (likely(error != 0))
522 goto out;
523 return AOP_TRUNCATED_PAGE;
524 } 518 }
525 error = __gfs2_readpage(file, page); 519 error = __gfs2_readpage(file, page);
526 gfs2_glock_dq(gh); 520 gfs2_glock_dq(&gh);
527out: 521out:
528 gfs2_holder_uninit(gh); 522 gfs2_holder_uninit(&gh);
529 kfree(gh); 523 if (error == GLR_TRYFAILED) {
524 yield();
525 return AOP_TRUNCATED_PAGE;
526 }
530 return error; 527 return error;
531} 528}
532 529
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index e1b7d525a066..0ff512a11925 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -669,8 +669,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
669 int error = 0; 669 int error = 0;
670 670
671 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 671 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
672 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE 672 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
673 | GL_FLOCK;
674 673
675 mutex_lock(&fp->f_fl_mutex); 674 mutex_lock(&fp->f_fl_mutex);
676 675
@@ -683,9 +682,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
683 gfs2_glock_dq_wait(fl_gh); 682 gfs2_glock_dq_wait(fl_gh);
684 gfs2_holder_reinit(state, flags, fl_gh); 683 gfs2_holder_reinit(state, flags, fl_gh);
685 } else { 684 } else {
686 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), 685 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
687 ip->i_no_addr, &gfs2_flock_glops, 686 &gfs2_flock_glops, CREATE, &gl);
688 CREATE, &gl);
689 if (error) 687 if (error)
690 goto out; 688 goto out;
691 gfs2_holder_init(gl, state, flags, fl_gh); 689 gfs2_holder_init(gl, state, flags, fl_gh);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 2888e4b4b1c5..fdd3f0f16d0d 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -505,7 +505,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
505 505
506 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 506 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
507 LM_FLAG_NOEXP | LM_FLAG_PRIORITY | 507 LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
508 GL_NOCANCEL | GL_NOCACHE, &t_gh); 508 GL_NOCACHE, &t_gh);
509 if (error) 509 if (error)
510 goto fail_gunlock_ji; 510 goto fail_gunlock_ji;
511 511
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 7aeacbc65f35..12fe38fe498f 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -941,8 +941,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
941 } 941 }
942 942
943 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED, 943 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
944 LM_FLAG_PRIORITY | GL_NOCACHE, 944 GL_NOCACHE, t_gh);
945 t_gh);
946 945
947 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 946 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
948 error = gfs2_jdesc_check(jd); 947 error = gfs2_jdesc_check(jd);