aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-09-07 14:40:21 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-09-07 14:40:21 -0400
commit85d1da67f7e1239afa3494d05be87da6fc3ecada (patch)
tree01508570249764d8b0e38183e1ea7e9666b34b78 /fs
parentb8547856f9c158ff70effbcfd15969c908fbe1b3 (diff)
[GFS2] Move glock hash table out of superblock
There are several reasons why we want to do this: - Firstly its large and thus we'll scale better with multiple GFS2 fs mounted at the same time - Secondly its easier to scale its size as required (thats a plan for later patches) - Thirdly, we can use kzalloc rather than vmalloc when allocating the superblock (its now only 4888 bytes) - Fourth its all part of my plan to eventually be able to use RCU with the glock hash. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/glock.c54
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/incore.h12
-rw-r--r--fs/gfs2/main.c6
-rw-r--r--fs/gfs2/ops_fstype.c9
5 files changed, 47 insertions, 36 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 00769674f2ea..5759f52a1cf9 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -49,6 +49,8 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl);
49static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 49static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
50static int dump_glock(struct gfs2_glock *gl); 50static int dump_glock(struct gfs2_glock *gl);
51 51
52static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
53
52/** 54/**
53 * relaxed_state_ok - is a requested lock compatible with the current lock mode? 55 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
54 * @actual: the current state of the lock 56 * @actual: the current state of the lock
@@ -231,10 +233,10 @@ static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
231 * Returns: NULL, or the struct gfs2_glock with the requested number 233 * Returns: NULL, or the struct gfs2_glock with the requested number
232 */ 234 */
233 235
234static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp, 236static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
235 const struct lm_lockname *name) 237 const struct lm_lockname *name)
236{ 238{
237 struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(sdp, name)]; 239 struct gfs2_gl_hash_bucket *bucket = &gl_hash_table[gl_hash(sdp, name)];
238 struct gfs2_glock *gl; 240 struct gfs2_glock *gl;
239 241
240 read_lock(&bucket->hb_lock); 242 read_lock(&bucket->hb_lock);
@@ -268,7 +270,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
268 270
269 name.ln_number = number; 271 name.ln_number = number;
270 name.ln_type = glops->go_type; 272 name.ln_type = glops->go_type;
271 bucket = &sdp->sd_gl_hash[gl_hash(sdp, &name)]; 273 bucket = &gl_hash_table[gl_hash(sdp, &name)];
272 274
273 read_lock(&bucket->hb_lock); 275 read_lock(&bucket->hb_lock);
274 gl = search_bucket(bucket, sdp, &name); 276 gl = search_bucket(bucket, sdp, &name);
@@ -648,9 +650,9 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
648 set_bit(HIF_MUTEX, &gh.gh_iflags); 650 set_bit(HIF_MUTEX, &gh.gh_iflags);
649 651
650 spin_lock(&gl->gl_spin); 652 spin_lock(&gl->gl_spin);
651 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 653 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
652 list_add_tail(&gh.gh_list, &gl->gl_waiters1); 654 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
653 else { 655 } else {
654 gl->gl_owner = current; 656 gl->gl_owner = current;
655 gl->gl_ip = (unsigned long)__builtin_return_address(0); 657 gl->gl_ip = (unsigned long)__builtin_return_address(0);
656 complete(&gh.gh_wait); 658 complete(&gh.gh_wait);
@@ -673,9 +675,9 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
673 int acquired = 1; 675 int acquired = 1;
674 676
675 spin_lock(&gl->gl_spin); 677 spin_lock(&gl->gl_spin);
676 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 678 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
677 acquired = 0; 679 acquired = 0;
678 else { 680 } else {
679 gl->gl_owner = current; 681 gl->gl_owner = current;
680 gl->gl_ip = (unsigned long)__builtin_return_address(0); 682 gl->gl_ip = (unsigned long)__builtin_return_address(0);
681 } 683 }
@@ -830,9 +832,9 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
830 spin_lock(&gl->gl_spin); 832 spin_lock(&gl->gl_spin);
831 list_del_init(&gh->gh_list); 833 list_del_init(&gh->gh_list);
832 if (gl->gl_state == gh->gh_state || 834 if (gl->gl_state == gh->gh_state ||
833 gl->gl_state == LM_ST_UNLOCKED) 835 gl->gl_state == LM_ST_UNLOCKED) {
834 gh->gh_error = 0; 836 gh->gh_error = 0;
835 else { 837 } else {
836 if (gfs2_assert_warn(sdp, gh->gh_flags & 838 if (gfs2_assert_warn(sdp, gh->gh_flags &
837 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1) 839 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
838 fs_warn(sdp, "ret = 0x%.8X\n", ret); 840 fs_warn(sdp, "ret = 0x%.8X\n", ret);
@@ -1090,8 +1092,7 @@ static int glock_wait_internal(struct gfs2_holder *gh)
1090 return gh->gh_error; 1092 return gh->gh_error;
1091 1093
1092 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); 1094 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1093 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, 1095 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1094 gh->gh_state,
1095 gh->gh_flags)); 1096 gh->gh_flags));
1096 1097
1097 if (test_bit(HIF_FIRST, &gh->gh_iflags)) { 1098 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
@@ -1901,6 +1902,8 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1901 1902
1902 if (test_bit(GLF_PLUG, &gl->gl_flags)) 1903 if (test_bit(GLF_PLUG, &gl->gl_flags))
1903 continue; 1904 continue;
1905 if (gl->gl_sbd != sdp)
1906 continue;
1904 1907
1905 /* examiner() must glock_put() */ 1908 /* examiner() must glock_put() */
1906 gfs2_glock_hold(gl); 1909 gfs2_glock_hold(gl);
@@ -1953,7 +1956,7 @@ void gfs2_scand_internal(struct gfs2_sbd *sdp)
1953 unsigned int x; 1956 unsigned int x;
1954 1957
1955 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { 1958 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1956 examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]); 1959 examine_bucket(scan_glock, sdp, &gl_hash_table[x]);
1957 cond_resched(); 1960 cond_resched();
1958 } 1961 }
1959} 1962}
@@ -2012,7 +2015,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2012 cont = 0; 2015 cont = 0;
2013 2016
2014 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) 2017 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2015 if (examine_bucket(clear_glock, sdp, &sdp->sd_gl_hash[x])) 2018 if (examine_bucket(clear_glock, sdp, &gl_hash_table[x]))
2016 cont = 1; 2019 cont = 1;
2017 2020
2018 if (!wait || !cont) 2021 if (!wait || !cont)
@@ -2114,14 +2117,13 @@ static int dump_glock(struct gfs2_glock *gl)
2114 2117
2115 spin_lock(&gl->gl_spin); 2118 spin_lock(&gl->gl_spin);
2116 2119
2117 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", 2120 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
2118 gl,
2119 gl->gl_name.ln_type,
2120 (unsigned long long)gl->gl_name.ln_number); 2121 (unsigned long long)gl->gl_name.ln_number);
2121 printk(KERN_INFO " gl_flags ="); 2122 printk(KERN_INFO " gl_flags =");
2122 for (x = 0; x < 32; x++) 2123 for (x = 0; x < 32; x++) {
2123 if (test_bit(x, &gl->gl_flags)) 2124 if (test_bit(x, &gl->gl_flags))
2124 printk(" %u", x); 2125 printk(" %u", x);
2126 }
2125 printk(" \n"); 2127 printk(" \n");
2126 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount)); 2128 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2127 printk(KERN_INFO " gl_state = %u\n", gl->gl_state); 2129 printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
@@ -2136,8 +2138,7 @@ static int dump_glock(struct gfs2_glock *gl)
2136 printk(KERN_INFO " reclaim = %s\n", 2138 printk(KERN_INFO " reclaim = %s\n",
2137 (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); 2139 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2138 if (gl->gl_aspace) 2140 if (gl->gl_aspace)
2139 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", 2141 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
2140 gl->gl_aspace,
2141 gl->gl_aspace->i_mapping->nrpages); 2142 gl->gl_aspace->i_mapping->nrpages);
2142 else 2143 else
2143 printk(KERN_INFO " aspace = no\n"); 2144 printk(KERN_INFO " aspace = no\n");
@@ -2203,13 +2204,15 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2203 int error = 0; 2204 int error = 0;
2204 2205
2205 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { 2206 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2206 bucket = &sdp->sd_gl_hash[x]; 2207 bucket = &gl_hash_table[x];
2207 2208
2208 read_lock(&bucket->hb_lock); 2209 read_lock(&bucket->hb_lock);
2209 2210
2210 list_for_each_entry(gl, &bucket->hb_list, gl_list) { 2211 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
2211 if (test_bit(GLF_PLUG, &gl->gl_flags)) 2212 if (test_bit(GLF_PLUG, &gl->gl_flags))
2212 continue; 2213 continue;
2214 if (gl->gl_sbd != sdp)
2215 continue;
2213 2216
2214 error = dump_glock(gl); 2217 error = dump_glock(gl);
2215 if (error) 2218 if (error)
@@ -2226,3 +2229,14 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2226 return error; 2229 return error;
2227} 2230}
2228 2231
2232int __init gfs2_glock_init(void)
2233{
2234 unsigned i;
2235 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2236 struct gfs2_gl_hash_bucket *hb = &gl_hash_table[i];
2237 rwlock_init(&hb->hb_lock);
2238 INIT_LIST_HEAD(&hb->hb_list);
2239 }
2240 return 0;
2241}
2242
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 2e1d32866321..0febca3d6d47 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -150,4 +150,6 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
150void gfs2_scand_internal(struct gfs2_sbd *sdp); 150void gfs2_scand_internal(struct gfs2_sbd *sdp);
151void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait); 151void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
152 152
153int __init gfs2_glock_init(void);
154
153#endif /* __GLOCK_DOT_H__ */ 155#endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 225924ca6b3e..61849607211f 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -30,7 +30,6 @@ struct gfs2_quota_data;
30struct gfs2_trans; 30struct gfs2_trans;
31struct gfs2_ail; 31struct gfs2_ail;
32struct gfs2_jdesc; 32struct gfs2_jdesc;
33struct gfs2_gl_hash_bucket;
34struct gfs2_sbd; 33struct gfs2_sbd;
35 34
36typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret); 35typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
@@ -107,6 +106,11 @@ struct gfs2_bufdata {
107 struct list_head bd_ail_gl_list; 106 struct list_head bd_ail_gl_list;
108}; 107};
109 108
109struct gfs2_gl_hash_bucket {
110 rwlock_t hb_lock;
111 struct list_head hb_list;
112};
113
110struct gfs2_glock_operations { 114struct gfs2_glock_operations {
111 void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state, 115 void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state,
112 int flags); 116 int flags);
@@ -442,11 +446,6 @@ struct gfs2_tune {
442 unsigned int gt_statfs_slow; 446 unsigned int gt_statfs_slow;
443}; 447};
444 448
445struct gfs2_gl_hash_bucket {
446 rwlock_t hb_lock;
447 struct list_head hb_list;
448};
449
450enum { 449enum {
451 SDF_JOURNAL_CHECKED = 0, 450 SDF_JOURNAL_CHECKED = 0,
452 SDF_JOURNAL_LIVE = 1, 451 SDF_JOURNAL_LIVE = 1,
@@ -489,7 +488,6 @@ struct gfs2_sbd {
489 /* Lock Stuff */ 488 /* Lock Stuff */
490 489
491 struct lm_lockstruct sd_lockstruct; 490 struct lm_lockstruct sd_lockstruct;
492 struct gfs2_gl_hash_bucket sd_gl_hash[GFS2_GL_HASH_SIZE];
493 struct list_head sd_reclaim_list; 491 struct list_head sd_reclaim_list;
494 spinlock_t sd_reclaim_lock; 492 spinlock_t sd_reclaim_lock;
495 wait_queue_head_t sd_reclaim_wq; 493 wait_queue_head_t sd_reclaim_wq;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 143fda727a9c..2bdf246436c7 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -23,6 +23,7 @@
23#include "ops_fstype.h" 23#include "ops_fstype.h"
24#include "sys.h" 24#include "sys.h"
25#include "util.h" 25#include "util.h"
26#include "glock.h"
26 27
27static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 28static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
28{ 29{
@@ -69,8 +70,11 @@ static int __init init_gfs2_fs(void)
69 if (error) 70 if (error)
70 return error; 71 return error;
71 72
72 error = -ENOMEM; 73 error = gfs2_glock_init();
74 if (error)
75 goto fail;
73 76
77 error = -ENOMEM;
74 gfs2_glock_cachep = kmem_cache_create("gfs2_glock", 78 gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
75 sizeof(struct gfs2_glock), 79 sizeof(struct gfs2_glock),
76 0, 0, 80 0, 0,
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index c94422b30ceb..f5140bdc1027 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -45,23 +45,16 @@ extern struct dentry_operations gfs2_dops;
45static struct gfs2_sbd *init_sbd(struct super_block *sb) 45static struct gfs2_sbd *init_sbd(struct super_block *sb)
46{ 46{
47 struct gfs2_sbd *sdp; 47 struct gfs2_sbd *sdp;
48 unsigned int x;
49 48
50 sdp = vmalloc(sizeof(struct gfs2_sbd)); 49 sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
51 if (!sdp) 50 if (!sdp)
52 return NULL; 51 return NULL;
53 52
54 memset(sdp, 0, sizeof(struct gfs2_sbd));
55
56 sb->s_fs_info = sdp; 53 sb->s_fs_info = sdp;
57 sdp->sd_vfs = sb; 54 sdp->sd_vfs = sb;
58 55
59 gfs2_tune_init(&sdp->sd_tune); 56 gfs2_tune_init(&sdp->sd_tune);
60 57
61 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
62 rwlock_init(&sdp->sd_gl_hash[x].hb_lock);
63 INIT_LIST_HEAD(&sdp->sd_gl_hash[x].hb_list);
64 }
65 INIT_LIST_HEAD(&sdp->sd_reclaim_list); 58 INIT_LIST_HEAD(&sdp->sd_reclaim_list);
66 spin_lock_init(&sdp->sd_reclaim_lock); 59 spin_lock_init(&sdp->sd_reclaim_lock);
67 init_waitqueue_head(&sdp->sd_reclaim_wq); 60 init_waitqueue_head(&sdp->sd_reclaim_wq);