aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-09-07 14:40:21 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-09-07 14:40:21 -0400
commit85d1da67f7e1239afa3494d05be87da6fc3ecada (patch)
tree01508570249764d8b0e38183e1ea7e9666b34b78 /fs/gfs2/glock.c
parentb8547856f9c158ff70effbcfd15969c908fbe1b3 (diff)
[GFS2] Move glock hash table out of superblock
There are several reasons why we want to do this: - Firstly its large and thus we'll scale better with multiple GFS2 fs mounted at the same time - Secondly its easier to scale its size as required (thats a plan for later patches) - Thirdly, we can use kzalloc rather than vmalloc when allocating the superblock (its now only 4888 bytes) - Fourth its all part of my plan to eventually be able to use RCU with the glock hash. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c54
1 files changed, 34 insertions, 20 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 00769674f2ea..5759f52a1cf9 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -49,6 +49,8 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl);
49static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 49static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
50static int dump_glock(struct gfs2_glock *gl); 50static int dump_glock(struct gfs2_glock *gl);
51 51
52static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
53
52/** 54/**
53 * relaxed_state_ok - is a requested lock compatible with the current lock mode? 55 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
54 * @actual: the current state of the lock 56 * @actual: the current state of the lock
@@ -231,10 +233,10 @@ static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
231 * Returns: NULL, or the struct gfs2_glock with the requested number 233 * Returns: NULL, or the struct gfs2_glock with the requested number
232 */ 234 */
233 235
234static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp, 236static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
235 const struct lm_lockname *name) 237 const struct lm_lockname *name)
236{ 238{
237 struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(sdp, name)]; 239 struct gfs2_gl_hash_bucket *bucket = &gl_hash_table[gl_hash(sdp, name)];
238 struct gfs2_glock *gl; 240 struct gfs2_glock *gl;
239 241
240 read_lock(&bucket->hb_lock); 242 read_lock(&bucket->hb_lock);
@@ -268,7 +270,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
268 270
269 name.ln_number = number; 271 name.ln_number = number;
270 name.ln_type = glops->go_type; 272 name.ln_type = glops->go_type;
271 bucket = &sdp->sd_gl_hash[gl_hash(sdp, &name)]; 273 bucket = &gl_hash_table[gl_hash(sdp, &name)];
272 274
273 read_lock(&bucket->hb_lock); 275 read_lock(&bucket->hb_lock);
274 gl = search_bucket(bucket, sdp, &name); 276 gl = search_bucket(bucket, sdp, &name);
@@ -648,9 +650,9 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
648 set_bit(HIF_MUTEX, &gh.gh_iflags); 650 set_bit(HIF_MUTEX, &gh.gh_iflags);
649 651
650 spin_lock(&gl->gl_spin); 652 spin_lock(&gl->gl_spin);
651 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 653 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
652 list_add_tail(&gh.gh_list, &gl->gl_waiters1); 654 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
653 else { 655 } else {
654 gl->gl_owner = current; 656 gl->gl_owner = current;
655 gl->gl_ip = (unsigned long)__builtin_return_address(0); 657 gl->gl_ip = (unsigned long)__builtin_return_address(0);
656 complete(&gh.gh_wait); 658 complete(&gh.gh_wait);
@@ -673,9 +675,9 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
673 int acquired = 1; 675 int acquired = 1;
674 676
675 spin_lock(&gl->gl_spin); 677 spin_lock(&gl->gl_spin);
676 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 678 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
677 acquired = 0; 679 acquired = 0;
678 else { 680 } else {
679 gl->gl_owner = current; 681 gl->gl_owner = current;
680 gl->gl_ip = (unsigned long)__builtin_return_address(0); 682 gl->gl_ip = (unsigned long)__builtin_return_address(0);
681 } 683 }
@@ -830,9 +832,9 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
830 spin_lock(&gl->gl_spin); 832 spin_lock(&gl->gl_spin);
831 list_del_init(&gh->gh_list); 833 list_del_init(&gh->gh_list);
832 if (gl->gl_state == gh->gh_state || 834 if (gl->gl_state == gh->gh_state ||
833 gl->gl_state == LM_ST_UNLOCKED) 835 gl->gl_state == LM_ST_UNLOCKED) {
834 gh->gh_error = 0; 836 gh->gh_error = 0;
835 else { 837 } else {
836 if (gfs2_assert_warn(sdp, gh->gh_flags & 838 if (gfs2_assert_warn(sdp, gh->gh_flags &
837 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1) 839 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
838 fs_warn(sdp, "ret = 0x%.8X\n", ret); 840 fs_warn(sdp, "ret = 0x%.8X\n", ret);
@@ -1090,8 +1092,7 @@ static int glock_wait_internal(struct gfs2_holder *gh)
1090 return gh->gh_error; 1092 return gh->gh_error;
1091 1093
1092 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); 1094 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1093 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, 1095 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1094 gh->gh_state,
1095 gh->gh_flags)); 1096 gh->gh_flags));
1096 1097
1097 if (test_bit(HIF_FIRST, &gh->gh_iflags)) { 1098 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
@@ -1901,6 +1902,8 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1901 1902
1902 if (test_bit(GLF_PLUG, &gl->gl_flags)) 1903 if (test_bit(GLF_PLUG, &gl->gl_flags))
1903 continue; 1904 continue;
1905 if (gl->gl_sbd != sdp)
1906 continue;
1904 1907
1905 /* examiner() must glock_put() */ 1908 /* examiner() must glock_put() */
1906 gfs2_glock_hold(gl); 1909 gfs2_glock_hold(gl);
@@ -1953,7 +1956,7 @@ void gfs2_scand_internal(struct gfs2_sbd *sdp)
1953 unsigned int x; 1956 unsigned int x;
1954 1957
1955 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { 1958 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1956 examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]); 1959 examine_bucket(scan_glock, sdp, &gl_hash_table[x]);
1957 cond_resched(); 1960 cond_resched();
1958 } 1961 }
1959} 1962}
@@ -2012,7 +2015,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2012 cont = 0; 2015 cont = 0;
2013 2016
2014 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) 2017 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2015 if (examine_bucket(clear_glock, sdp, &sdp->sd_gl_hash[x])) 2018 if (examine_bucket(clear_glock, sdp, &gl_hash_table[x]))
2016 cont = 1; 2019 cont = 1;
2017 2020
2018 if (!wait || !cont) 2021 if (!wait || !cont)
@@ -2114,14 +2117,13 @@ static int dump_glock(struct gfs2_glock *gl)
2114 2117
2115 spin_lock(&gl->gl_spin); 2118 spin_lock(&gl->gl_spin);
2116 2119
2117 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", 2120 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
2118 gl,
2119 gl->gl_name.ln_type,
2120 (unsigned long long)gl->gl_name.ln_number); 2121 (unsigned long long)gl->gl_name.ln_number);
2121 printk(KERN_INFO " gl_flags ="); 2122 printk(KERN_INFO " gl_flags =");
2122 for (x = 0; x < 32; x++) 2123 for (x = 0; x < 32; x++) {
2123 if (test_bit(x, &gl->gl_flags)) 2124 if (test_bit(x, &gl->gl_flags))
2124 printk(" %u", x); 2125 printk(" %u", x);
2126 }
2125 printk(" \n"); 2127 printk(" \n");
2126 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount)); 2128 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2127 printk(KERN_INFO " gl_state = %u\n", gl->gl_state); 2129 printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
@@ -2136,8 +2138,7 @@ static int dump_glock(struct gfs2_glock *gl)
2136 printk(KERN_INFO " reclaim = %s\n", 2138 printk(KERN_INFO " reclaim = %s\n",
2137 (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); 2139 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2138 if (gl->gl_aspace) 2140 if (gl->gl_aspace)
2139 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", 2141 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
2140 gl->gl_aspace,
2141 gl->gl_aspace->i_mapping->nrpages); 2142 gl->gl_aspace->i_mapping->nrpages);
2142 else 2143 else
2143 printk(KERN_INFO " aspace = no\n"); 2144 printk(KERN_INFO " aspace = no\n");
@@ -2203,13 +2204,15 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2203 int error = 0; 2204 int error = 0;
2204 2205
2205 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { 2206 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2206 bucket = &sdp->sd_gl_hash[x]; 2207 bucket = &gl_hash_table[x];
2207 2208
2208 read_lock(&bucket->hb_lock); 2209 read_lock(&bucket->hb_lock);
2209 2210
2210 list_for_each_entry(gl, &bucket->hb_list, gl_list) { 2211 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
2211 if (test_bit(GLF_PLUG, &gl->gl_flags)) 2212 if (test_bit(GLF_PLUG, &gl->gl_flags))
2212 continue; 2213 continue;
2214 if (gl->gl_sbd != sdp)
2215 continue;
2213 2216
2214 error = dump_glock(gl); 2217 error = dump_glock(gl);
2215 if (error) 2218 if (error)
@@ -2226,3 +2229,14 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2226 return error; 2229 return error;
2227} 2230}
2228 2231
2232int __init gfs2_glock_init(void)
2233{
2234 unsigned i;
2235 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2236 struct gfs2_gl_hash_bucket *hb = &gl_hash_table[i];
2237 rwlock_init(&hb->hb_lock);
2238 INIT_LIST_HEAD(&hb->hb_list);
2239 }
2240 return 0;
2241}
2242