diff options
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 316 |
1 files changed, 75 insertions, 241 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 438146904b58..6618c1190252 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <linux/gfs2_ondisk.h> | 19 | #include <linux/gfs2_ondisk.h> |
20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | #include <linux/lm_interface.h> | 21 | #include <linux/lm_interface.h> |
22 | #include <linux/wait.h> | ||
23 | #include <linux/rwsem.h> | ||
22 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
23 | 25 | ||
24 | #include "gfs2.h" | 26 | #include "gfs2.h" |
@@ -33,11 +35,6 @@ | |||
33 | #include "super.h" | 35 | #include "super.h" |
34 | #include "util.h" | 36 | #include "util.h" |
35 | 37 | ||
36 | struct greedy { | ||
37 | struct gfs2_holder gr_gh; | ||
38 | struct delayed_work gr_work; | ||
39 | }; | ||
40 | |||
41 | struct gfs2_gl_hash_bucket { | 38 | struct gfs2_gl_hash_bucket { |
42 | struct hlist_head hb_list; | 39 | struct hlist_head hb_list; |
43 | }; | 40 | }; |
@@ -47,6 +44,9 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl); | |||
47 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); | 44 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); |
48 | static int dump_glock(struct gfs2_glock *gl); | 45 | static int dump_glock(struct gfs2_glock *gl); |
49 | static int dump_inode(struct gfs2_inode *ip); | 46 | static int dump_inode(struct gfs2_inode *ip); |
47 | static void gfs2_glock_xmote_th(struct gfs2_holder *gh); | ||
48 | static void gfs2_glock_drop_th(struct gfs2_glock *gl); | ||
49 | static DECLARE_RWSEM(gfs2_umount_flush_sem); | ||
50 | 50 | ||
51 | #define GFS2_GL_HASH_SHIFT 15 | 51 | #define GFS2_GL_HASH_SHIFT 15 |
52 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) | 52 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) |
@@ -213,30 +213,6 @@ out: | |||
213 | } | 213 | } |
214 | 214 | ||
215 | /** | 215 | /** |
216 | * queue_empty - check to see if a glock's queue is empty | ||
217 | * @gl: the glock | ||
218 | * @head: the head of the queue to check | ||
219 | * | ||
220 | * This function protects the list in the event that a process already | ||
221 | * has a holder on the list and is adding a second holder for itself. | ||
222 | * The glmutex lock is what generally prevents processes from working | ||
223 | * on the same glock at once, but the special case of adding a second | ||
224 | * holder for yourself ("recursive" locking) doesn't involve locking | ||
225 | * glmutex, making the spin lock necessary. | ||
226 | * | ||
227 | * Returns: 1 if the queue is empty | ||
228 | */ | ||
229 | |||
230 | static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head) | ||
231 | { | ||
232 | int empty; | ||
233 | spin_lock(&gl->gl_spin); | ||
234 | empty = list_empty(head); | ||
235 | spin_unlock(&gl->gl_spin); | ||
236 | return empty; | ||
237 | } | ||
238 | |||
239 | /** | ||
240 | * search_bucket() - Find struct gfs2_glock by lock number | 216 | * search_bucket() - Find struct gfs2_glock by lock number |
241 | * @bucket: the bucket to search | 217 | * @bucket: the bucket to search |
242 | * @name: The lock name | 218 | * @name: The lock name |
@@ -395,11 +371,6 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, | |||
395 | gh->gh_flags = flags; | 371 | gh->gh_flags = flags; |
396 | gh->gh_error = 0; | 372 | gh->gh_error = 0; |
397 | gh->gh_iflags = 0; | 373 | gh->gh_iflags = 0; |
398 | init_completion(&gh->gh_wait); | ||
399 | |||
400 | if (gh->gh_state == LM_ST_EXCLUSIVE) | ||
401 | gh->gh_flags |= GL_LOCAL_EXCL; | ||
402 | |||
403 | gfs2_glock_hold(gl); | 374 | gfs2_glock_hold(gl); |
404 | } | 375 | } |
405 | 376 | ||
@@ -417,9 +388,6 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder * | |||
417 | { | 388 | { |
418 | gh->gh_state = state; | 389 | gh->gh_state = state; |
419 | gh->gh_flags = flags; | 390 | gh->gh_flags = flags; |
420 | if (gh->gh_state == LM_ST_EXCLUSIVE) | ||
421 | gh->gh_flags |= GL_LOCAL_EXCL; | ||
422 | |||
423 | gh->gh_iflags &= 1 << HIF_ALLOCED; | 391 | gh->gh_iflags &= 1 << HIF_ALLOCED; |
424 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 392 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
425 | } | 393 | } |
@@ -479,6 +447,29 @@ static void gfs2_holder_put(struct gfs2_holder *gh) | |||
479 | kfree(gh); | 447 | kfree(gh); |
480 | } | 448 | } |
481 | 449 | ||
450 | static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh) | ||
451 | { | ||
452 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) { | ||
453 | gfs2_holder_put(gh); | ||
454 | return; | ||
455 | } | ||
456 | clear_bit(HIF_WAIT, &gh->gh_iflags); | ||
457 | smp_mb(); | ||
458 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); | ||
459 | } | ||
460 | |||
461 | static int holder_wait(void *word) | ||
462 | { | ||
463 | schedule(); | ||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | static void wait_on_holder(struct gfs2_holder *gh) | ||
468 | { | ||
469 | might_sleep(); | ||
470 | wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE); | ||
471 | } | ||
472 | |||
482 | /** | 473 | /** |
483 | * rq_mutex - process a mutex request in the queue | 474 | * rq_mutex - process a mutex request in the queue |
484 | * @gh: the glock holder | 475 | * @gh: the glock holder |
@@ -493,7 +484,9 @@ static int rq_mutex(struct gfs2_holder *gh) | |||
493 | list_del_init(&gh->gh_list); | 484 | list_del_init(&gh->gh_list); |
494 | /* gh->gh_error never examined. */ | 485 | /* gh->gh_error never examined. */ |
495 | set_bit(GLF_LOCK, &gl->gl_flags); | 486 | set_bit(GLF_LOCK, &gl->gl_flags); |
496 | complete(&gh->gh_wait); | 487 | clear_bit(HIF_WAIT, &gh->gh_iflags); |
488 | smp_mb(); | ||
489 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); | ||
497 | 490 | ||
498 | return 1; | 491 | return 1; |
499 | } | 492 | } |
@@ -511,7 +504,6 @@ static int rq_promote(struct gfs2_holder *gh) | |||
511 | { | 504 | { |
512 | struct gfs2_glock *gl = gh->gh_gl; | 505 | struct gfs2_glock *gl = gh->gh_gl; |
513 | struct gfs2_sbd *sdp = gl->gl_sbd; | 506 | struct gfs2_sbd *sdp = gl->gl_sbd; |
514 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
515 | 507 | ||
516 | if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | 508 | if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { |
517 | if (list_empty(&gl->gl_holders)) { | 509 | if (list_empty(&gl->gl_holders)) { |
@@ -526,7 +518,7 @@ static int rq_promote(struct gfs2_holder *gh) | |||
526 | gfs2_reclaim_glock(sdp); | 518 | gfs2_reclaim_glock(sdp); |
527 | } | 519 | } |
528 | 520 | ||
529 | glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); | 521 | gfs2_glock_xmote_th(gh); |
530 | spin_lock(&gl->gl_spin); | 522 | spin_lock(&gl->gl_spin); |
531 | } | 523 | } |
532 | return 1; | 524 | return 1; |
@@ -537,11 +529,11 @@ static int rq_promote(struct gfs2_holder *gh) | |||
537 | set_bit(GLF_LOCK, &gl->gl_flags); | 529 | set_bit(GLF_LOCK, &gl->gl_flags); |
538 | } else { | 530 | } else { |
539 | struct gfs2_holder *next_gh; | 531 | struct gfs2_holder *next_gh; |
540 | if (gh->gh_flags & GL_LOCAL_EXCL) | 532 | if (gh->gh_state == LM_ST_EXCLUSIVE) |
541 | return 1; | 533 | return 1; |
542 | next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, | 534 | next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, |
543 | gh_list); | 535 | gh_list); |
544 | if (next_gh->gh_flags & GL_LOCAL_EXCL) | 536 | if (next_gh->gh_state == LM_ST_EXCLUSIVE) |
545 | return 1; | 537 | return 1; |
546 | } | 538 | } |
547 | 539 | ||
@@ -549,7 +541,7 @@ static int rq_promote(struct gfs2_holder *gh) | |||
549 | gh->gh_error = 0; | 541 | gh->gh_error = 0; |
550 | set_bit(HIF_HOLDER, &gh->gh_iflags); | 542 | set_bit(HIF_HOLDER, &gh->gh_iflags); |
551 | 543 | ||
552 | complete(&gh->gh_wait); | 544 | gfs2_holder_dispose_or_wake(gh); |
553 | 545 | ||
554 | return 0; | 546 | return 0; |
555 | } | 547 | } |
@@ -564,7 +556,6 @@ static int rq_promote(struct gfs2_holder *gh) | |||
564 | static int rq_demote(struct gfs2_holder *gh) | 556 | static int rq_demote(struct gfs2_holder *gh) |
565 | { | 557 | { |
566 | struct gfs2_glock *gl = gh->gh_gl; | 558 | struct gfs2_glock *gl = gh->gh_gl; |
567 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
568 | 559 | ||
569 | if (!list_empty(&gl->gl_holders)) | 560 | if (!list_empty(&gl->gl_holders)) |
570 | return 1; | 561 | return 1; |
@@ -573,10 +564,7 @@ static int rq_demote(struct gfs2_holder *gh) | |||
573 | list_del_init(&gh->gh_list); | 564 | list_del_init(&gh->gh_list); |
574 | gh->gh_error = 0; | 565 | gh->gh_error = 0; |
575 | spin_unlock(&gl->gl_spin); | 566 | spin_unlock(&gl->gl_spin); |
576 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) | 567 | gfs2_holder_dispose_or_wake(gh); |
577 | gfs2_holder_put(gh); | ||
578 | else | ||
579 | complete(&gh->gh_wait); | ||
580 | spin_lock(&gl->gl_spin); | 568 | spin_lock(&gl->gl_spin); |
581 | } else { | 569 | } else { |
582 | gl->gl_req_gh = gh; | 570 | gl->gl_req_gh = gh; |
@@ -585,9 +573,9 @@ static int rq_demote(struct gfs2_holder *gh) | |||
585 | 573 | ||
586 | if (gh->gh_state == LM_ST_UNLOCKED || | 574 | if (gh->gh_state == LM_ST_UNLOCKED || |
587 | gl->gl_state != LM_ST_EXCLUSIVE) | 575 | gl->gl_state != LM_ST_EXCLUSIVE) |
588 | glops->go_drop_th(gl); | 576 | gfs2_glock_drop_th(gl); |
589 | else | 577 | else |
590 | glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); | 578 | gfs2_glock_xmote_th(gh); |
591 | 579 | ||
592 | spin_lock(&gl->gl_spin); | 580 | spin_lock(&gl->gl_spin); |
593 | } | 581 | } |
@@ -596,30 +584,6 @@ static int rq_demote(struct gfs2_holder *gh) | |||
596 | } | 584 | } |
597 | 585 | ||
598 | /** | 586 | /** |
599 | * rq_greedy - process a queued request to drop greedy status | ||
600 | * @gh: the glock holder | ||
601 | * | ||
602 | * Returns: 1 if the queue is blocked | ||
603 | */ | ||
604 | |||
605 | static int rq_greedy(struct gfs2_holder *gh) | ||
606 | { | ||
607 | struct gfs2_glock *gl = gh->gh_gl; | ||
608 | |||
609 | list_del_init(&gh->gh_list); | ||
610 | /* gh->gh_error never examined. */ | ||
611 | clear_bit(GLF_GREEDY, &gl->gl_flags); | ||
612 | spin_unlock(&gl->gl_spin); | ||
613 | |||
614 | gfs2_holder_uninit(gh); | ||
615 | kfree(container_of(gh, struct greedy, gr_gh)); | ||
616 | |||
617 | spin_lock(&gl->gl_spin); | ||
618 | |||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | /** | ||
623 | * run_queue - process holder structures on a glock | 587 | * run_queue - process holder structures on a glock |
624 | * @gl: the glock | 588 | * @gl: the glock |
625 | * | 589 | * |
@@ -649,8 +613,6 @@ static void run_queue(struct gfs2_glock *gl) | |||
649 | 613 | ||
650 | if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) | 614 | if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) |
651 | blocked = rq_demote(gh); | 615 | blocked = rq_demote(gh); |
652 | else if (test_bit(HIF_GREEDY, &gh->gh_iflags)) | ||
653 | blocked = rq_greedy(gh); | ||
654 | else | 616 | else |
655 | gfs2_assert_warn(gl->gl_sbd, 0); | 617 | gfs2_assert_warn(gl->gl_sbd, 0); |
656 | 618 | ||
@@ -684,6 +646,8 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl) | |||
684 | 646 | ||
685 | gfs2_holder_init(gl, 0, 0, &gh); | 647 | gfs2_holder_init(gl, 0, 0, &gh); |
686 | set_bit(HIF_MUTEX, &gh.gh_iflags); | 648 | set_bit(HIF_MUTEX, &gh.gh_iflags); |
649 | if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags)) | ||
650 | BUG(); | ||
687 | 651 | ||
688 | spin_lock(&gl->gl_spin); | 652 | spin_lock(&gl->gl_spin); |
689 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 653 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
@@ -691,11 +655,13 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl) | |||
691 | } else { | 655 | } else { |
692 | gl->gl_owner = current; | 656 | gl->gl_owner = current; |
693 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | 657 | gl->gl_ip = (unsigned long)__builtin_return_address(0); |
694 | complete(&gh.gh_wait); | 658 | clear_bit(HIF_WAIT, &gh.gh_iflags); |
659 | smp_mb(); | ||
660 | wake_up_bit(&gh.gh_iflags, HIF_WAIT); | ||
695 | } | 661 | } |
696 | spin_unlock(&gl->gl_spin); | 662 | spin_unlock(&gl->gl_spin); |
697 | 663 | ||
698 | wait_for_completion(&gh.gh_wait); | 664 | wait_on_holder(&gh); |
699 | gfs2_holder_uninit(&gh); | 665 | gfs2_holder_uninit(&gh); |
700 | } | 666 | } |
701 | 667 | ||
@@ -774,6 +740,7 @@ restart: | |||
774 | return; | 740 | return; |
775 | set_bit(HIF_DEMOTE, &new_gh->gh_iflags); | 741 | set_bit(HIF_DEMOTE, &new_gh->gh_iflags); |
776 | set_bit(HIF_DEALLOC, &new_gh->gh_iflags); | 742 | set_bit(HIF_DEALLOC, &new_gh->gh_iflags); |
743 | set_bit(HIF_WAIT, &new_gh->gh_iflags); | ||
777 | 744 | ||
778 | goto restart; | 745 | goto restart; |
779 | } | 746 | } |
@@ -825,7 +792,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
825 | int op_done = 1; | 792 | int op_done = 1; |
826 | 793 | ||
827 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 794 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
828 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 795 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
829 | gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); | 796 | gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); |
830 | 797 | ||
831 | state_change(gl, ret & LM_OUT_ST_MASK); | 798 | state_change(gl, ret & LM_OUT_ST_MASK); |
@@ -908,12 +875,8 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
908 | 875 | ||
909 | gfs2_glock_put(gl); | 876 | gfs2_glock_put(gl); |
910 | 877 | ||
911 | if (gh) { | 878 | if (gh) |
912 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) | 879 | gfs2_holder_dispose_or_wake(gh); |
913 | gfs2_holder_put(gh); | ||
914 | else | ||
915 | complete(&gh->gh_wait); | ||
916 | } | ||
917 | } | 880 | } |
918 | 881 | ||
919 | /** | 882 | /** |
@@ -924,23 +887,26 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
924 | * | 887 | * |
925 | */ | 888 | */ |
926 | 889 | ||
927 | void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags) | 890 | void gfs2_glock_xmote_th(struct gfs2_holder *gh) |
928 | { | 891 | { |
892 | struct gfs2_glock *gl = gh->gh_gl; | ||
929 | struct gfs2_sbd *sdp = gl->gl_sbd; | 893 | struct gfs2_sbd *sdp = gl->gl_sbd; |
894 | int flags = gh->gh_flags; | ||
895 | unsigned state = gh->gh_state; | ||
930 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 896 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
931 | int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | | 897 | int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | |
932 | LM_FLAG_NOEXP | LM_FLAG_ANY | | 898 | LM_FLAG_NOEXP | LM_FLAG_ANY | |
933 | LM_FLAG_PRIORITY); | 899 | LM_FLAG_PRIORITY); |
934 | unsigned int lck_ret; | 900 | unsigned int lck_ret; |
935 | 901 | ||
902 | if (glops->go_xmote_th) | ||
903 | glops->go_xmote_th(gl); | ||
904 | |||
936 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 905 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
937 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 906 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
938 | gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); | 907 | gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); |
939 | gfs2_assert_warn(sdp, state != gl->gl_state); | 908 | gfs2_assert_warn(sdp, state != gl->gl_state); |
940 | 909 | ||
941 | if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync) | ||
942 | glops->go_sync(gl); | ||
943 | |||
944 | gfs2_glock_hold(gl); | 910 | gfs2_glock_hold(gl); |
945 | gl->gl_req_bh = xmote_bh; | 911 | gl->gl_req_bh = xmote_bh; |
946 | 912 | ||
@@ -971,10 +937,8 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | |||
971 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 937 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
972 | struct gfs2_holder *gh = gl->gl_req_gh; | 938 | struct gfs2_holder *gh = gl->gl_req_gh; |
973 | 939 | ||
974 | clear_bit(GLF_PREFETCH, &gl->gl_flags); | ||
975 | |||
976 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 940 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
977 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 941 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
978 | gfs2_assert_warn(sdp, !ret); | 942 | gfs2_assert_warn(sdp, !ret); |
979 | 943 | ||
980 | state_change(gl, LM_ST_UNLOCKED); | 944 | state_change(gl, LM_ST_UNLOCKED); |
@@ -1001,12 +965,8 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | |||
1001 | 965 | ||
1002 | gfs2_glock_put(gl); | 966 | gfs2_glock_put(gl); |
1003 | 967 | ||
1004 | if (gh) { | 968 | if (gh) |
1005 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) | 969 | gfs2_holder_dispose_or_wake(gh); |
1006 | gfs2_holder_put(gh); | ||
1007 | else | ||
1008 | complete(&gh->gh_wait); | ||
1009 | } | ||
1010 | } | 970 | } |
1011 | 971 | ||
1012 | /** | 972 | /** |
@@ -1015,19 +975,19 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | |||
1015 | * | 975 | * |
1016 | */ | 976 | */ |
1017 | 977 | ||
1018 | void gfs2_glock_drop_th(struct gfs2_glock *gl) | 978 | static void gfs2_glock_drop_th(struct gfs2_glock *gl) |
1019 | { | 979 | { |
1020 | struct gfs2_sbd *sdp = gl->gl_sbd; | 980 | struct gfs2_sbd *sdp = gl->gl_sbd; |
1021 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 981 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
1022 | unsigned int ret; | 982 | unsigned int ret; |
1023 | 983 | ||
984 | if (glops->go_drop_th) | ||
985 | glops->go_drop_th(gl); | ||
986 | |||
1024 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 987 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
1025 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 988 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
1026 | gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); | 989 | gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); |
1027 | 990 | ||
1028 | if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync) | ||
1029 | glops->go_sync(gl); | ||
1030 | |||
1031 | gfs2_glock_hold(gl); | 991 | gfs2_glock_hold(gl); |
1032 | gl->gl_req_bh = drop_bh; | 992 | gl->gl_req_bh = drop_bh; |
1033 | 993 | ||
@@ -1107,8 +1067,7 @@ static int glock_wait_internal(struct gfs2_holder *gh) | |||
1107 | if (gh->gh_flags & LM_FLAG_PRIORITY) | 1067 | if (gh->gh_flags & LM_FLAG_PRIORITY) |
1108 | do_cancels(gh); | 1068 | do_cancels(gh); |
1109 | 1069 | ||
1110 | wait_for_completion(&gh->gh_wait); | 1070 | wait_on_holder(gh); |
1111 | |||
1112 | if (gh->gh_error) | 1071 | if (gh->gh_error) |
1113 | return gh->gh_error; | 1072 | return gh->gh_error; |
1114 | 1073 | ||
@@ -1164,6 +1123,8 @@ static void add_to_queue(struct gfs2_holder *gh) | |||
1164 | struct gfs2_holder *existing; | 1123 | struct gfs2_holder *existing; |
1165 | 1124 | ||
1166 | BUG_ON(!gh->gh_owner); | 1125 | BUG_ON(!gh->gh_owner); |
1126 | if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) | ||
1127 | BUG(); | ||
1167 | 1128 | ||
1168 | existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); | 1129 | existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); |
1169 | if (existing) { | 1130 | if (existing) { |
@@ -1227,8 +1188,6 @@ restart: | |||
1227 | } | 1188 | } |
1228 | } | 1189 | } |
1229 | 1190 | ||
1230 | clear_bit(GLF_PREFETCH, &gl->gl_flags); | ||
1231 | |||
1232 | return error; | 1191 | return error; |
1233 | } | 1192 | } |
1234 | 1193 | ||
@@ -1321,98 +1280,6 @@ void gfs2_glock_dq(struct gfs2_holder *gh) | |||
1321 | } | 1280 | } |
1322 | 1281 | ||
1323 | /** | 1282 | /** |
1324 | * gfs2_glock_prefetch - Try to prefetch a glock | ||
1325 | * @gl: the glock | ||
1326 | * @state: the state to prefetch in | ||
1327 | * @flags: flags passed to go_xmote_th() | ||
1328 | * | ||
1329 | */ | ||
1330 | |||
1331 | static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, | ||
1332 | int flags) | ||
1333 | { | ||
1334 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
1335 | |||
1336 | spin_lock(&gl->gl_spin); | ||
1337 | |||
1338 | if (test_bit(GLF_LOCK, &gl->gl_flags) || !list_empty(&gl->gl_holders) || | ||
1339 | !list_empty(&gl->gl_waiters1) || !list_empty(&gl->gl_waiters2) || | ||
1340 | !list_empty(&gl->gl_waiters3) || | ||
1341 | relaxed_state_ok(gl->gl_state, state, flags)) { | ||
1342 | spin_unlock(&gl->gl_spin); | ||
1343 | return; | ||
1344 | } | ||
1345 | |||
1346 | set_bit(GLF_PREFETCH, &gl->gl_flags); | ||
1347 | set_bit(GLF_LOCK, &gl->gl_flags); | ||
1348 | spin_unlock(&gl->gl_spin); | ||
1349 | |||
1350 | glops->go_xmote_th(gl, state, flags); | ||
1351 | } | ||
1352 | |||
1353 | static void greedy_work(struct work_struct *work) | ||
1354 | { | ||
1355 | struct greedy *gr = container_of(work, struct greedy, gr_work.work); | ||
1356 | struct gfs2_holder *gh = &gr->gr_gh; | ||
1357 | struct gfs2_glock *gl = gh->gh_gl; | ||
1358 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
1359 | |||
1360 | clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); | ||
1361 | |||
1362 | if (glops->go_greedy) | ||
1363 | glops->go_greedy(gl); | ||
1364 | |||
1365 | spin_lock(&gl->gl_spin); | ||
1366 | |||
1367 | if (list_empty(&gl->gl_waiters2)) { | ||
1368 | clear_bit(GLF_GREEDY, &gl->gl_flags); | ||
1369 | spin_unlock(&gl->gl_spin); | ||
1370 | gfs2_holder_uninit(gh); | ||
1371 | kfree(gr); | ||
1372 | } else { | ||
1373 | gfs2_glock_hold(gl); | ||
1374 | list_add_tail(&gh->gh_list, &gl->gl_waiters2); | ||
1375 | run_queue(gl); | ||
1376 | spin_unlock(&gl->gl_spin); | ||
1377 | gfs2_glock_put(gl); | ||
1378 | } | ||
1379 | } | ||
1380 | |||
1381 | /** | ||
1382 | * gfs2_glock_be_greedy - | ||
1383 | * @gl: | ||
1384 | * @time: | ||
1385 | * | ||
1386 | * Returns: 0 if go_greedy will be called, 1 otherwise | ||
1387 | */ | ||
1388 | |||
1389 | int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time) | ||
1390 | { | ||
1391 | struct greedy *gr; | ||
1392 | struct gfs2_holder *gh; | ||
1393 | |||
1394 | if (!time || gl->gl_sbd->sd_args.ar_localcaching || | ||
1395 | test_and_set_bit(GLF_GREEDY, &gl->gl_flags)) | ||
1396 | return 1; | ||
1397 | |||
1398 | gr = kmalloc(sizeof(struct greedy), GFP_KERNEL); | ||
1399 | if (!gr) { | ||
1400 | clear_bit(GLF_GREEDY, &gl->gl_flags); | ||
1401 | return 1; | ||
1402 | } | ||
1403 | gh = &gr->gr_gh; | ||
1404 | |||
1405 | gfs2_holder_init(gl, 0, 0, gh); | ||
1406 | set_bit(HIF_GREEDY, &gh->gh_iflags); | ||
1407 | INIT_DELAYED_WORK(&gr->gr_work, greedy_work); | ||
1408 | |||
1409 | set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); | ||
1410 | schedule_delayed_work(&gr->gr_work, time); | ||
1411 | |||
1412 | return 0; | ||
1413 | } | ||
1414 | |||
1415 | /** | ||
1416 | * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it | 1283 | * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it |
1417 | * @gh: the holder structure | 1284 | * @gh: the holder structure |
1418 | * | 1285 | * |
@@ -1470,10 +1337,7 @@ static int glock_compare(const void *arg_a, const void *arg_b) | |||
1470 | return 1; | 1337 | return 1; |
1471 | if (a->ln_number < b->ln_number) | 1338 | if (a->ln_number < b->ln_number) |
1472 | return -1; | 1339 | return -1; |
1473 | if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE) | 1340 | BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); |
1474 | return 1; | ||
1475 | if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && (gh_b->gh_flags & GL_LOCAL_EXCL)) | ||
1476 | return 1; | ||
1477 | return 0; | 1341 | return 0; |
1478 | } | 1342 | } |
1479 | 1343 | ||
@@ -1618,34 +1482,6 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) | |||
1618 | } | 1482 | } |
1619 | 1483 | ||
1620 | /** | 1484 | /** |
1621 | * gfs2_glock_prefetch_num - prefetch a glock based on lock number | ||
1622 | * @sdp: the filesystem | ||
1623 | * @number: the lock number | ||
1624 | * @glops: the glock operations for the type of glock | ||
1625 | * @state: the state to acquire the glock in | ||
1626 | * @flags: modifier flags for the aquisition | ||
1627 | * | ||
1628 | * Returns: errno | ||
1629 | */ | ||
1630 | |||
1631 | void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number, | ||
1632 | const struct gfs2_glock_operations *glops, | ||
1633 | unsigned int state, int flags) | ||
1634 | { | ||
1635 | struct gfs2_glock *gl; | ||
1636 | int error; | ||
1637 | |||
1638 | if (atomic_read(&sdp->sd_reclaim_count) < | ||
1639 | gfs2_tune_get(sdp, gt_reclaim_limit)) { | ||
1640 | error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); | ||
1641 | if (!error) { | ||
1642 | gfs2_glock_prefetch(gl, state, flags); | ||
1643 | gfs2_glock_put(gl); | ||
1644 | } | ||
1645 | } | ||
1646 | } | ||
1647 | |||
1648 | /** | ||
1649 | * gfs2_lvb_hold - attach a LVB from a glock | 1485 | * gfs2_lvb_hold - attach a LVB from a glock |
1650 | * @gl: The glock in question | 1486 | * @gl: The glock in question |
1651 | * | 1487 | * |
@@ -1703,8 +1539,6 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, | |||
1703 | if (!gl) | 1539 | if (!gl) |
1704 | return; | 1540 | return; |
1705 | 1541 | ||
1706 | if (gl->gl_ops->go_callback) | ||
1707 | gl->gl_ops->go_callback(gl, state); | ||
1708 | handle_callback(gl, state); | 1542 | handle_callback(gl, state); |
1709 | 1543 | ||
1710 | spin_lock(&gl->gl_spin); | 1544 | spin_lock(&gl->gl_spin); |
@@ -1746,12 +1580,14 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) | |||
1746 | struct lm_async_cb *async = data; | 1580 | struct lm_async_cb *async = data; |
1747 | struct gfs2_glock *gl; | 1581 | struct gfs2_glock *gl; |
1748 | 1582 | ||
1583 | down_read(&gfs2_umount_flush_sem); | ||
1749 | gl = gfs2_glock_find(sdp, &async->lc_name); | 1584 | gl = gfs2_glock_find(sdp, &async->lc_name); |
1750 | if (gfs2_assert_warn(sdp, gl)) | 1585 | if (gfs2_assert_warn(sdp, gl)) |
1751 | return; | 1586 | return; |
1752 | if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) | 1587 | if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) |
1753 | gl->gl_req_bh(gl, async->lc_ret); | 1588 | gl->gl_req_bh(gl, async->lc_ret); |
1754 | gfs2_glock_put(gl); | 1589 | gfs2_glock_put(gl); |
1590 | up_read(&gfs2_umount_flush_sem); | ||
1755 | return; | 1591 | return; |
1756 | } | 1592 | } |
1757 | 1593 | ||
@@ -1781,15 +1617,11 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) | |||
1781 | 1617 | ||
1782 | static int demote_ok(struct gfs2_glock *gl) | 1618 | static int demote_ok(struct gfs2_glock *gl) |
1783 | { | 1619 | { |
1784 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
1785 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 1620 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
1786 | int demote = 1; | 1621 | int demote = 1; |
1787 | 1622 | ||
1788 | if (test_bit(GLF_STICKY, &gl->gl_flags)) | 1623 | if (test_bit(GLF_STICKY, &gl->gl_flags)) |
1789 | demote = 0; | 1624 | demote = 0; |
1790 | else if (test_bit(GLF_PREFETCH, &gl->gl_flags)) | ||
1791 | demote = time_after_eq(jiffies, gl->gl_stamp + | ||
1792 | gfs2_tune_get(sdp, gt_prefetch_secs) * HZ); | ||
1793 | else if (glops->go_demote_ok) | 1625 | else if (glops->go_demote_ok) |
1794 | demote = glops->go_demote_ok(gl); | 1626 | demote = glops->go_demote_ok(gl); |
1795 | 1627 | ||
@@ -1845,7 +1677,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | |||
1845 | atomic_inc(&sdp->sd_reclaimed); | 1677 | atomic_inc(&sdp->sd_reclaimed); |
1846 | 1678 | ||
1847 | if (gfs2_glmutex_trylock(gl)) { | 1679 | if (gfs2_glmutex_trylock(gl)) { |
1848 | if (queue_empty(gl, &gl->gl_holders) && | 1680 | if (list_empty(&gl->gl_holders) && |
1849 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | 1681 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) |
1850 | handle_callback(gl, LM_ST_UNLOCKED); | 1682 | handle_callback(gl, LM_ST_UNLOCKED); |
1851 | gfs2_glmutex_unlock(gl); | 1683 | gfs2_glmutex_unlock(gl); |
@@ -1909,7 +1741,7 @@ static void scan_glock(struct gfs2_glock *gl) | |||
1909 | return; | 1741 | return; |
1910 | 1742 | ||
1911 | if (gfs2_glmutex_trylock(gl)) { | 1743 | if (gfs2_glmutex_trylock(gl)) { |
1912 | if (queue_empty(gl, &gl->gl_holders) && | 1744 | if (list_empty(&gl->gl_holders) && |
1913 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | 1745 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) |
1914 | goto out_schedule; | 1746 | goto out_schedule; |
1915 | gfs2_glmutex_unlock(gl); | 1747 | gfs2_glmutex_unlock(gl); |
@@ -1958,7 +1790,7 @@ static void clear_glock(struct gfs2_glock *gl) | |||
1958 | } | 1790 | } |
1959 | 1791 | ||
1960 | if (gfs2_glmutex_trylock(gl)) { | 1792 | if (gfs2_glmutex_trylock(gl)) { |
1961 | if (queue_empty(gl, &gl->gl_holders) && | 1793 | if (list_empty(&gl->gl_holders) && |
1962 | gl->gl_state != LM_ST_UNLOCKED) | 1794 | gl->gl_state != LM_ST_UNLOCKED) |
1963 | handle_callback(gl, LM_ST_UNLOCKED); | 1795 | handle_callback(gl, LM_ST_UNLOCKED); |
1964 | gfs2_glmutex_unlock(gl); | 1796 | gfs2_glmutex_unlock(gl); |
@@ -2000,7 +1832,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | |||
2000 | t = jiffies; | 1832 | t = jiffies; |
2001 | } | 1833 | } |
2002 | 1834 | ||
1835 | down_write(&gfs2_umount_flush_sem); | ||
2003 | invalidate_inodes(sdp->sd_vfs); | 1836 | invalidate_inodes(sdp->sd_vfs); |
1837 | up_write(&gfs2_umount_flush_sem); | ||
2004 | msleep(10); | 1838 | msleep(10); |
2005 | } | 1839 | } |
2006 | } | 1840 | } |