aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-09-12 10:10:01 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-09-12 10:10:01 -0400
commitb6397893a5ed81970e803d61ee2f1a0e79f87438 (patch)
tree88b1f9fe213b70f0c4d96583bb40601d9cfc446d /fs/gfs2
parent24264434603cc102d71fb2a1b3b7e282a781f449 (diff)
[GFS2] Use hlist for glock hash chains
This results in smaller list heads, so that we can have more chains in the same amount of memory (twice as many). I've multiplied the size of the table by four though - this is because we are saving memory by not having one lock per chain any more. So we land up using about the same amount of memory for the hash table as we did before I started these changes, the difference being that we now have four times as many hash chains. The reason that I say "about the same amount of memory" is that the actual amount now depends upon the NR_CPUS and some of the config variables, so that its not exact and in some cases we do use more memory. Eventually we might want to scale the hash table size according to the size of physical ram as measured on module load. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c28
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/main.c2
3 files changed, 18 insertions, 14 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b5effb9e4a38..cf54b0b001fd 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -40,7 +40,7 @@ struct greedy {
40}; 40};
41 41
42struct gfs2_gl_hash_bucket { 42struct gfs2_gl_hash_bucket {
43 struct list_head hb_list; 43 struct hlist_head hb_list;
44}; 44};
45 45
46typedef void (*glock_examiner) (struct gfs2_glock * gl); 46typedef void (*glock_examiner) (struct gfs2_glock * gl);
@@ -49,7 +49,7 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
49static int dump_glock(struct gfs2_glock *gl); 49static int dump_glock(struct gfs2_glock *gl);
50static int dump_inode(struct gfs2_inode *ip); 50static int dump_inode(struct gfs2_inode *ip);
51 51
52#define GFS2_GL_HASH_SHIFT 13 52#define GFS2_GL_HASH_SHIFT 15
53#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 53#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
54#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) 54#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
55 55
@@ -210,7 +210,7 @@ int gfs2_glock_put(struct gfs2_glock *gl)
210 210
211 write_lock(gl_lock_addr(gl->gl_hash)); 211 write_lock(gl_lock_addr(gl->gl_hash));
212 if (kref_put(&gl->gl_ref, kill_glock)) { 212 if (kref_put(&gl->gl_ref, kill_glock)) {
213 list_del_init(&gl->gl_list); 213 hlist_del(&gl->gl_list);
214 write_unlock(gl_lock_addr(gl->gl_hash)); 214 write_unlock(gl_lock_addr(gl->gl_hash));
215 BUG_ON(spin_is_locked(&gl->gl_spin)); 215 BUG_ON(spin_is_locked(&gl->gl_spin));
216 glock_free(gl); 216 glock_free(gl);
@@ -259,8 +259,9 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
259 const struct lm_lockname *name) 259 const struct lm_lockname *name)
260{ 260{
261 struct gfs2_glock *gl; 261 struct gfs2_glock *gl;
262 struct hlist_node *h;
262 263
263 list_for_each_entry(gl, &gl_hash_table[hash].hb_list, gl_list) { 264 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
264 if (!lm_name_equal(&gl->gl_name, name)) 265 if (!lm_name_equal(&gl->gl_name, name))
265 continue; 266 continue;
266 if (gl->gl_sbd != sdp) 267 if (gl->gl_sbd != sdp)
@@ -368,7 +369,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
368 glock_free(gl); 369 glock_free(gl);
369 gl = tmp; 370 gl = tmp;
370 } else { 371 } else {
371 list_add_tail(&gl->gl_list, &gl_hash_table[hash].hb_list); 372 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
372 write_unlock(gl_lock_addr(hash)); 373 write_unlock(gl_lock_addr(hash));
373 } 374 }
374 375
@@ -1895,15 +1896,15 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1895{ 1896{
1896 struct gfs2_glock *gl, *prev = NULL; 1897 struct gfs2_glock *gl, *prev = NULL;
1897 int has_entries = 0; 1898 int has_entries = 0;
1898 struct list_head *head = &gl_hash_table[hash].hb_list; 1899 struct hlist_head *head = &gl_hash_table[hash].hb_list;
1899 1900
1900 read_lock(gl_lock_addr(hash)); 1901 read_lock(gl_lock_addr(hash));
1901 /* Can't use list_for_each_entry - don't want prefetch here */ 1902 /* Can't use hlist_for_each_entry - don't want prefetch here */
1902 if (list_empty(head)) 1903 if (hlist_empty(head))
1903 goto out; 1904 goto out;
1904 has_entries = 1; 1905 has_entries = 1;
1905 gl = list_entry(head->next, struct gfs2_glock, gl_list); 1906 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1906 while(&gl->gl_list != head) { 1907 while(1) {
1907 if (gl->gl_sbd == sdp) { 1908 if (gl->gl_sbd == sdp) {
1908 gfs2_glock_hold(gl); 1909 gfs2_glock_hold(gl);
1909 read_unlock(gl_lock_addr(hash)); 1910 read_unlock(gl_lock_addr(hash));
@@ -1913,6 +1914,8 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1913 examiner(gl); 1914 examiner(gl);
1914 read_lock(gl_lock_addr(hash)); 1915 read_lock(gl_lock_addr(hash));
1915 } 1916 }
1917 if (gl->gl_list.next == NULL)
1918 break;
1916 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list); 1919 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1917 } 1920 }
1918out: 1921out:
@@ -2195,6 +2198,7 @@ out:
2195static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) 2198static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2196{ 2199{
2197 struct gfs2_glock *gl; 2200 struct gfs2_glock *gl;
2201 struct hlist_node *h;
2198 unsigned int x; 2202 unsigned int x;
2199 int error = 0; 2203 int error = 0;
2200 2204
@@ -2202,7 +2206,7 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2202 2206
2203 read_lock(gl_lock_addr(x)); 2207 read_lock(gl_lock_addr(x));
2204 2208
2205 list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) { 2209 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
2206 if (gl->gl_sbd != sdp) 2210 if (gl->gl_sbd != sdp)
2207 continue; 2211 continue;
2208 2212
@@ -2225,7 +2229,7 @@ int __init gfs2_glock_init(void)
2225{ 2229{
2226 unsigned i; 2230 unsigned i;
2227 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { 2231 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2228 INIT_LIST_HEAD(&gl_hash_table[i].hb_list); 2232 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2229 } 2233 }
2230#ifdef GL_HASH_LOCK_SZ 2234#ifdef GL_HASH_LOCK_SZ
2231 for(i = 0; i < GL_HASH_LOCK_SZ; i++) { 2235 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 1fed8d1abae5..c68d39271ede 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -161,7 +161,7 @@ enum {
161}; 161};
162 162
163struct gfs2_glock { 163struct gfs2_glock {
164 struct list_head gl_list; 164 struct hlist_node gl_list;
165 unsigned long gl_flags; /* GLF_... */ 165 unsigned long gl_flags; /* GLF_... */
166 struct lm_lockname gl_name; 166 struct lm_lockname gl_name;
167 struct kref gl_ref; 167 struct kref gl_ref;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 2bdf246436c7..d2867988cc34 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -42,7 +42,7 @@ static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long
42 struct gfs2_glock *gl = foo; 42 struct gfs2_glock *gl = foo;
43 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 43 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
44 SLAB_CTOR_CONSTRUCTOR) { 44 SLAB_CTOR_CONSTRUCTOR) {
45 INIT_LIST_HEAD(&gl->gl_list); 45 INIT_HLIST_NODE(&gl->gl_list);
46 spin_lock_init(&gl->gl_spin); 46 spin_lock_init(&gl->gl_spin);
47 INIT_LIST_HEAD(&gl->gl_holders); 47 INIT_LIST_HEAD(&gl->gl_holders);
48 INIT_LIST_HEAD(&gl->gl_waiters1); 48 INIT_LIST_HEAD(&gl->gl_waiters1);