aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2017-08-01 12:18:26 -0400
committerBob Peterson <rpeterso@redhat.com>2017-08-10 11:39:31 -0400
commit0515480ad424f2d6853ffe448f444ba3c756c057 (patch)
treef7a9d9d952c4cafc36e41f517086f0599b564d31
parent61b91cfdc6c0c49a8cc8258cbee846551029d694 (diff)
gfs2: gfs2_glock_get: Wait on freeing glocks
Keep glocks in their hash table until they are freed instead of removing them when their last reference is dropped. This allows to wait for any previous instances of a glock to go away in gfs2_glock_get before creating a new glocks. Special thanks to Andy Price for finding and fixing a problem which also required us to delete the rcu_read_unlock from the error case in function gfs2_glock_get. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> Signed-off-by: Bob Peterson <rpeterso@redhat.com>
-rw-r--r--fs/gfs2/glock.c126
1 files changed, 104 insertions, 22 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 1029340fc8ba..11d48b964047 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -15,6 +15,7 @@
15#include <linux/buffer_head.h> 15#include <linux/buffer_head.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/sort.h> 17#include <linux/sort.h>
18#include <linux/hash.h>
18#include <linux/jhash.h> 19#include <linux/jhash.h>
19#include <linux/kallsyms.h> 20#include <linux/kallsyms.h>
20#include <linux/gfs2_ondisk.h> 21#include <linux/gfs2_ondisk.h>
@@ -80,6 +81,66 @@ static struct rhashtable_params ht_parms = {
80 81
81static struct rhashtable gl_hash_table; 82static struct rhashtable gl_hash_table;
82 83
84#define GLOCK_WAIT_TABLE_BITS 12
85#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
86static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
87
88struct wait_glock_queue {
89 struct lm_lockname *name;
90 wait_queue_entry_t wait;
91};
92
93static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
94 int sync, void *key)
95{
96 struct wait_glock_queue *wait_glock =
97 container_of(wait, struct wait_glock_queue, wait);
98 struct lm_lockname *wait_name = wait_glock->name;
99 struct lm_lockname *wake_name = key;
100
101 if (wake_name->ln_sbd != wait_name->ln_sbd ||
102 wake_name->ln_number != wait_name->ln_number ||
103 wake_name->ln_type != wait_name->ln_type)
104 return 0;
105 return autoremove_wake_function(wait, mode, sync, key);
106}
107
108static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
109{
110 u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
111
112 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
113}
114
115static void prepare_to_wait_on_glock(wait_queue_head_t **wq,
116 struct wait_glock_queue *wait,
117 struct lm_lockname *name)
118{
119 wait->name = name;
120 init_wait(&wait->wait);
121 wait->wait.func = glock_wake_function;
122 *wq = glock_waitqueue(name);
123 prepare_to_wait(*wq, &wait->wait, TASK_UNINTERRUPTIBLE);
124}
125
126static void finish_wait_on_glock(wait_queue_head_t *wq,
127 struct wait_glock_queue *wait)
128{
129 finish_wait(wq, &wait->wait);
130}
131
132/**
133 * wake_up_glock - Wake up waiters on a glock
134 * @gl: the glock
135 */
136static void wake_up_glock(struct gfs2_glock *gl)
137{
138 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
139
140 if (waitqueue_active(wq))
141 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
142}
143
83static void gfs2_glock_dealloc(struct rcu_head *rcu) 144static void gfs2_glock_dealloc(struct rcu_head *rcu)
84{ 145{
85 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 146 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
@@ -96,6 +157,9 @@ void gfs2_glock_free(struct gfs2_glock *gl)
96{ 157{
97 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 158 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
98 159
160 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
161 smp_mb();
162 wake_up_glock(gl);
99 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 163 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
100 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 164 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
101 wake_up(&sdp->sd_glock_wait); 165 wake_up(&sdp->sd_glock_wait);
@@ -194,7 +258,6 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
194 258
195 gfs2_glock_remove_from_lru(gl); 259 gfs2_glock_remove_from_lru(gl);
196 spin_unlock(&gl->gl_lockref.lock); 260 spin_unlock(&gl->gl_lockref.lock);
197 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
198 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 261 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
199 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); 262 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
200 trace_gfs2_glock_put(gl); 263 trace_gfs2_glock_put(gl);
@@ -679,6 +742,36 @@ static void glock_work_func(struct work_struct *work)
679 spin_unlock(&gl->gl_lockref.lock); 742 spin_unlock(&gl->gl_lockref.lock);
680} 743}
681 744
745static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
746 struct gfs2_glock *new)
747{
748 struct wait_glock_queue wait;
749 wait_queue_head_t *wq;
750 struct gfs2_glock *gl;
751
752again:
753 prepare_to_wait_on_glock(&wq, &wait, name);
754 rcu_read_lock();
755 if (new) {
756 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
757 &new->gl_node, ht_parms);
758 if (IS_ERR(gl))
759 goto out;
760 } else {
761 gl = rhashtable_lookup_fast(&gl_hash_table,
762 name, ht_parms);
763 }
764 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
765 rcu_read_unlock();
766 schedule();
767 goto again;
768 }
769out:
770 rcu_read_unlock();
771 finish_wait_on_glock(wq, &wait);
772 return gl;
773}
774
682/** 775/**
683 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 776 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
684 * @sdp: The GFS2 superblock 777 * @sdp: The GFS2 superblock
@@ -705,15 +798,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
705 struct kmem_cache *cachep; 798 struct kmem_cache *cachep;
706 int ret = 0; 799 int ret = 0;
707 800
708 rcu_read_lock(); 801 gl = find_insert_glock(&name, NULL);
709 gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms); 802 if (gl) {
710 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) 803 *glp = gl;
711 gl = NULL;
712 rcu_read_unlock();
713
714 *glp = gl;
715 if (gl)
716 return 0; 804 return 0;
805 }
717 if (!create) 806 if (!create)
718 return -ENOENT; 807 return -ENOENT;
719 808
@@ -767,10 +856,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
767 mapping->writeback_index = 0; 856 mapping->writeback_index = 0;
768 } 857 }
769 858
770again: 859 tmp = find_insert_glock(&name, gl);
771 rcu_read_lock();
772 tmp = rhashtable_lookup_get_insert_fast(&gl_hash_table, &gl->gl_node,
773 ht_parms);
774 if (!tmp) { 860 if (!tmp) {
775 *glp = gl; 861 *glp = gl;
776 goto out; 862 goto out;
@@ -779,13 +865,7 @@ again:
779 ret = PTR_ERR(tmp); 865 ret = PTR_ERR(tmp);
780 goto out_free; 866 goto out_free;
781 } 867 }
782 if (lockref_get_not_dead(&tmp->gl_lockref)) { 868 *glp = tmp;
783 *glp = tmp;
784 goto out_free;
785 }
786 rcu_read_unlock();
787 cond_resched();
788 goto again;
789 869
790out_free: 870out_free:
791 kfree(gl->gl_lksb.sb_lvbptr); 871 kfree(gl->gl_lksb.sb_lvbptr);
@@ -793,7 +873,6 @@ out_free:
793 atomic_dec(&sdp->sd_glock_disposal); 873 atomic_dec(&sdp->sd_glock_disposal);
794 874
795out: 875out:
796 rcu_read_unlock();
797 return ret; 876 return ret;
798} 877}
799 878
@@ -1806,7 +1885,7 @@ static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1806 1885
1807int __init gfs2_glock_init(void) 1886int __init gfs2_glock_init(void)
1808{ 1887{
1809 int ret; 1888 int i, ret;
1810 1889
1811 ret = rhashtable_init(&gl_hash_table, &ht_parms); 1890 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1812 if (ret < 0) 1891 if (ret < 0)
@@ -1835,6 +1914,9 @@ int __init gfs2_glock_init(void)
1835 return ret; 1914 return ret;
1836 } 1915 }
1837 1916
1917 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
1918 init_waitqueue_head(glock_wait_table + i);
1919
1838 return 0; 1920 return 0;
1839} 1921}
1840 1922