diff options
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 110 |
1 files changed, 44 insertions, 66 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 8b674b1f3a55..454d4b4eb36b 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/wait.h> | 20 | #include <linux/wait.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/rwsem.h> | ||
23 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
24 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
25 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
@@ -60,7 +59,6 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl); | |||
60 | #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) | 59 | #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) |
61 | static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); | 60 | static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); |
62 | 61 | ||
63 | static DECLARE_RWSEM(gfs2_umount_flush_sem); | ||
64 | static struct dentry *gfs2_root; | 62 | static struct dentry *gfs2_root; |
65 | static struct workqueue_struct *glock_workqueue; | 63 | static struct workqueue_struct *glock_workqueue; |
66 | struct workqueue_struct *gfs2_delete_workqueue; | 64 | struct workqueue_struct *gfs2_delete_workqueue; |
@@ -154,12 +152,14 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp, | |||
154 | static void glock_free(struct gfs2_glock *gl) | 152 | static void glock_free(struct gfs2_glock *gl) |
155 | { | 153 | { |
156 | struct gfs2_sbd *sdp = gl->gl_sbd; | 154 | struct gfs2_sbd *sdp = gl->gl_sbd; |
157 | struct inode *aspace = gl->gl_aspace; | 155 | struct address_space *mapping = gfs2_glock2aspace(gl); |
156 | struct kmem_cache *cachep = gfs2_glock_cachep; | ||
158 | 157 | ||
159 | if (aspace) | 158 | GLOCK_BUG_ON(gl, mapping && mapping->nrpages); |
160 | gfs2_aspace_put(aspace); | ||
161 | trace_gfs2_glock_put(gl); | 159 | trace_gfs2_glock_put(gl); |
162 | sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl); | 160 | if (mapping) |
161 | cachep = gfs2_glock_aspace_cachep; | ||
162 | sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl); | ||
163 | } | 163 | } |
164 | 164 | ||
165 | /** | 165 | /** |
@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
241 | int rv = 0; | 241 | int rv = 0; |
242 | 242 | ||
243 | write_lock(gl_lock_addr(gl->gl_hash)); | 243 | write_lock(gl_lock_addr(gl->gl_hash)); |
244 | if (atomic_dec_and_test(&gl->gl_ref)) { | 244 | if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) { |
245 | hlist_del(&gl->gl_list); | 245 | hlist_del(&gl->gl_list); |
246 | write_unlock(gl_lock_addr(gl->gl_hash)); | ||
247 | spin_lock(&lru_lock); | ||
248 | if (!list_empty(&gl->gl_lru)) { | 246 | if (!list_empty(&gl->gl_lru)) { |
249 | list_del_init(&gl->gl_lru); | 247 | list_del_init(&gl->gl_lru); |
250 | atomic_dec(&lru_count); | 248 | atomic_dec(&lru_count); |
251 | } | 249 | } |
252 | spin_unlock(&lru_lock); | 250 | spin_unlock(&lru_lock); |
251 | write_unlock(gl_lock_addr(gl->gl_hash)); | ||
253 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); | 252 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
254 | glock_free(gl); | 253 | glock_free(gl); |
255 | rv = 1; | 254 | rv = 1; |
@@ -513,7 +512,6 @@ retry: | |||
513 | GLOCK_BUG_ON(gl, 1); | 512 | GLOCK_BUG_ON(gl, 1); |
514 | } | 513 | } |
515 | spin_unlock(&gl->gl_spin); | 514 | spin_unlock(&gl->gl_spin); |
516 | gfs2_glock_put(gl); | ||
517 | return; | 515 | return; |
518 | } | 516 | } |
519 | 517 | ||
@@ -524,8 +522,6 @@ retry: | |||
524 | if (glops->go_xmote_bh) { | 522 | if (glops->go_xmote_bh) { |
525 | spin_unlock(&gl->gl_spin); | 523 | spin_unlock(&gl->gl_spin); |
526 | rv = glops->go_xmote_bh(gl, gh); | 524 | rv = glops->go_xmote_bh(gl, gh); |
527 | if (rv == -EAGAIN) | ||
528 | return; | ||
529 | spin_lock(&gl->gl_spin); | 525 | spin_lock(&gl->gl_spin); |
530 | if (rv) { | 526 | if (rv) { |
531 | do_error(gl, rv); | 527 | do_error(gl, rv); |
@@ -540,7 +536,6 @@ out: | |||
540 | clear_bit(GLF_LOCK, &gl->gl_flags); | 536 | clear_bit(GLF_LOCK, &gl->gl_flags); |
541 | out_locked: | 537 | out_locked: |
542 | spin_unlock(&gl->gl_spin); | 538 | spin_unlock(&gl->gl_spin); |
543 | gfs2_glock_put(gl); | ||
544 | } | 539 | } |
545 | 540 | ||
546 | static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, | 541 | static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, |
@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin) | |||
600 | 595 | ||
601 | if (!(ret & LM_OUT_ASYNC)) { | 596 | if (!(ret & LM_OUT_ASYNC)) { |
602 | finish_xmote(gl, ret); | 597 | finish_xmote(gl, ret); |
603 | gfs2_glock_hold(gl); | ||
604 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 598 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
605 | gfs2_glock_put(gl); | 599 | gfs2_glock_put(gl); |
606 | } else { | 600 | } else { |
@@ -672,12 +666,17 @@ out: | |||
672 | return; | 666 | return; |
673 | 667 | ||
674 | out_sched: | 668 | out_sched: |
669 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
670 | smp_mb__after_clear_bit(); | ||
675 | gfs2_glock_hold(gl); | 671 | gfs2_glock_hold(gl); |
676 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 672 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
677 | gfs2_glock_put_nolock(gl); | 673 | gfs2_glock_put_nolock(gl); |
674 | return; | ||
675 | |||
678 | out_unlock: | 676 | out_unlock: |
679 | clear_bit(GLF_LOCK, &gl->gl_flags); | 677 | clear_bit(GLF_LOCK, &gl->gl_flags); |
680 | goto out; | 678 | smp_mb__after_clear_bit(); |
679 | return; | ||
681 | } | 680 | } |
682 | 681 | ||
683 | static void delete_work_func(struct work_struct *work) | 682 | static void delete_work_func(struct work_struct *work) |
@@ -707,10 +706,12 @@ static void glock_work_func(struct work_struct *work) | |||
707 | { | 706 | { |
708 | unsigned long delay = 0; | 707 | unsigned long delay = 0; |
709 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); | 708 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); |
709 | int drop_ref = 0; | ||
710 | 710 | ||
711 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) | 711 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { |
712 | finish_xmote(gl, gl->gl_reply); | 712 | finish_xmote(gl, gl->gl_reply); |
713 | down_read(&gfs2_umount_flush_sem); | 713 | drop_ref = 1; |
714 | } | ||
714 | spin_lock(&gl->gl_spin); | 715 | spin_lock(&gl->gl_spin); |
715 | if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && | 716 | if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && |
716 | gl->gl_state != LM_ST_UNLOCKED && | 717 | gl->gl_state != LM_ST_UNLOCKED && |
@@ -723,10 +724,11 @@ static void glock_work_func(struct work_struct *work) | |||
723 | } | 724 | } |
724 | run_queue(gl, 0); | 725 | run_queue(gl, 0); |
725 | spin_unlock(&gl->gl_spin); | 726 | spin_unlock(&gl->gl_spin); |
726 | up_read(&gfs2_umount_flush_sem); | ||
727 | if (!delay || | 727 | if (!delay || |
728 | queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) | 728 | queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) |
729 | gfs2_glock_put(gl); | 729 | gfs2_glock_put(gl); |
730 | if (drop_ref) | ||
731 | gfs2_glock_put(gl); | ||
730 | } | 732 | } |
731 | 733 | ||
732 | /** | 734 | /** |
@@ -746,10 +748,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
746 | const struct gfs2_glock_operations *glops, int create, | 748 | const struct gfs2_glock_operations *glops, int create, |
747 | struct gfs2_glock **glp) | 749 | struct gfs2_glock **glp) |
748 | { | 750 | { |
751 | struct super_block *s = sdp->sd_vfs; | ||
749 | struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; | 752 | struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; |
750 | struct gfs2_glock *gl, *tmp; | 753 | struct gfs2_glock *gl, *tmp; |
751 | unsigned int hash = gl_hash(sdp, &name); | 754 | unsigned int hash = gl_hash(sdp, &name); |
752 | int error; | 755 | struct address_space *mapping; |
753 | 756 | ||
754 | read_lock(gl_lock_addr(hash)); | 757 | read_lock(gl_lock_addr(hash)); |
755 | gl = search_bucket(hash, sdp, &name); | 758 | gl = search_bucket(hash, sdp, &name); |
@@ -761,10 +764,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
761 | if (!create) | 764 | if (!create) |
762 | return -ENOENT; | 765 | return -ENOENT; |
763 | 766 | ||
764 | gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); | 767 | if (glops->go_flags & GLOF_ASPACE) |
768 | gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL); | ||
769 | else | ||
770 | gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); | ||
765 | if (!gl) | 771 | if (!gl) |
766 | return -ENOMEM; | 772 | return -ENOMEM; |
767 | 773 | ||
774 | atomic_inc(&sdp->sd_glock_disposal); | ||
768 | gl->gl_flags = 0; | 775 | gl->gl_flags = 0; |
769 | gl->gl_name = name; | 776 | gl->gl_name = name; |
770 | atomic_set(&gl->gl_ref, 1); | 777 | atomic_set(&gl->gl_ref, 1); |
@@ -779,18 +786,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
779 | gl->gl_tchange = jiffies; | 786 | gl->gl_tchange = jiffies; |
780 | gl->gl_object = NULL; | 787 | gl->gl_object = NULL; |
781 | gl->gl_sbd = sdp; | 788 | gl->gl_sbd = sdp; |
782 | gl->gl_aspace = NULL; | ||
783 | INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); | 789 | INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); |
784 | INIT_WORK(&gl->gl_delete, delete_work_func); | 790 | INIT_WORK(&gl->gl_delete, delete_work_func); |
785 | 791 | ||
786 | /* If this glock protects actual on-disk data or metadata blocks, | 792 | mapping = gfs2_glock2aspace(gl); |
787 | create a VFS inode to manage the pages/buffers holding them. */ | 793 | if (mapping) { |
788 | if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) { | 794 | mapping->a_ops = &gfs2_meta_aops; |
789 | gl->gl_aspace = gfs2_aspace_get(sdp); | 795 | mapping->host = s->s_bdev->bd_inode; |
790 | if (!gl->gl_aspace) { | 796 | mapping->flags = 0; |
791 | error = -ENOMEM; | 797 | mapping_set_gfp_mask(mapping, GFP_NOFS); |
792 | goto fail; | 798 | mapping->assoc_mapping = NULL; |
793 | } | 799 | mapping->backing_dev_info = s->s_bdi; |
800 | mapping->writeback_index = 0; | ||
794 | } | 801 | } |
795 | 802 | ||
796 | write_lock(gl_lock_addr(hash)); | 803 | write_lock(gl_lock_addr(hash)); |
@@ -807,10 +814,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
807 | *glp = gl; | 814 | *glp = gl; |
808 | 815 | ||
809 | return 0; | 816 | return 0; |
810 | |||
811 | fail: | ||
812 | kmem_cache_free(gfs2_glock_cachep, gl); | ||
813 | return error; | ||
814 | } | 817 | } |
815 | 818 | ||
816 | /** | 819 | /** |
@@ -1361,10 +1364,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) | |||
1361 | list_del_init(&gl->gl_lru); | 1364 | list_del_init(&gl->gl_lru); |
1362 | atomic_dec(&lru_count); | 1365 | atomic_dec(&lru_count); |
1363 | 1366 | ||
1364 | /* Check if glock is about to be freed */ | ||
1365 | if (atomic_read(&gl->gl_ref) == 0) | ||
1366 | continue; | ||
1367 | |||
1368 | /* Test for being demotable */ | 1367 | /* Test for being demotable */ |
1369 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 1368 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
1370 | gfs2_glock_hold(gl); | 1369 | gfs2_glock_hold(gl); |
@@ -1375,10 +1374,11 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) | |||
1375 | handle_callback(gl, LM_ST_UNLOCKED, 0); | 1374 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
1376 | nr--; | 1375 | nr--; |
1377 | } | 1376 | } |
1377 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
1378 | smp_mb__after_clear_bit(); | ||
1378 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1379 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
1379 | gfs2_glock_put_nolock(gl); | 1380 | gfs2_glock_put_nolock(gl); |
1380 | spin_unlock(&gl->gl_spin); | 1381 | spin_unlock(&gl->gl_spin); |
1381 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
1382 | spin_lock(&lru_lock); | 1382 | spin_lock(&lru_lock); |
1383 | continue; | 1383 | continue; |
1384 | } | 1384 | } |
@@ -1508,35 +1508,13 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp) | |||
1508 | 1508 | ||
1509 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) | 1509 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) |
1510 | { | 1510 | { |
1511 | unsigned long t; | ||
1512 | unsigned int x; | 1511 | unsigned int x; |
1513 | int cont; | ||
1514 | 1512 | ||
1515 | t = jiffies; | 1513 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) |
1516 | 1514 | examine_bucket(clear_glock, sdp, x); | |
1517 | for (;;) { | 1515 | flush_workqueue(glock_workqueue); |
1518 | cont = 0; | 1516 | wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); |
1519 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | 1517 | gfs2_dump_lockstate(sdp); |
1520 | if (examine_bucket(clear_glock, sdp, x)) | ||
1521 | cont = 1; | ||
1522 | } | ||
1523 | |||
1524 | if (!cont) | ||
1525 | break; | ||
1526 | |||
1527 | if (time_after_eq(jiffies, | ||
1528 | t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) { | ||
1529 | fs_warn(sdp, "Unmount seems to be stalled. " | ||
1530 | "Dumping lock state...\n"); | ||
1531 | gfs2_dump_lockstate(sdp); | ||
1532 | t = jiffies; | ||
1533 | } | ||
1534 | |||
1535 | down_write(&gfs2_umount_flush_sem); | ||
1536 | invalidate_inodes(sdp->sd_vfs); | ||
1537 | up_write(&gfs2_umount_flush_sem); | ||
1538 | msleep(10); | ||
1539 | } | ||
1540 | } | 1518 | } |
1541 | 1519 | ||
1542 | void gfs2_glock_finish_truncate(struct gfs2_inode *ip) | 1520 | void gfs2_glock_finish_truncate(struct gfs2_inode *ip) |
@@ -1680,7 +1658,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) | |||
1680 | dtime *= 1000000/HZ; /* demote time in uSec */ | 1658 | dtime *= 1000000/HZ; /* demote time in uSec */ |
1681 | if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) | 1659 | if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) |
1682 | dtime = 0; | 1660 | dtime = 0; |
1683 | gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n", | 1661 | gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n", |
1684 | state2str(gl->gl_state), | 1662 | state2str(gl->gl_state), |
1685 | gl->gl_name.ln_type, | 1663 | gl->gl_name.ln_type, |
1686 | (unsigned long long)gl->gl_name.ln_number, | 1664 | (unsigned long long)gl->gl_name.ln_number, |