aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-08-30 10:36:52 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-08-30 10:36:52 -0400
commitec45d9f583b3663f90a7c5c559fd13e6e4c56ad5 (patch)
tree8248788ed2d2f1cb0ffc6b7180da3366b6773d62 /fs/gfs2
parent5e2b0613ed9f9641937dd5948051631249447c57 (diff)
[GFS2] Use slab properly with glocks
We can take advantage of the slab allocator to ensure that all the list heads and the spinlock (plus one or two other fields) are initialised by slab to speed up allocation of glocks. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c39
-rw-r--r--fs/gfs2/main.c23
2 files changed, 34 insertions, 28 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c3dde8560827..4a2e90dc1d02 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -278,31 +278,22 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
278 if (!gl) 278 if (!gl)
279 return -ENOMEM; 279 return -ENOMEM;
280 280
281 memset(gl, 0, sizeof(struct gfs2_glock)); 281 gl->gl_flags = 0;
282
283 INIT_LIST_HEAD(&gl->gl_list);
284 gl->gl_name = name; 282 gl->gl_name = name;
285 kref_init(&gl->gl_ref); 283 kref_init(&gl->gl_ref);
286
287 spin_lock_init(&gl->gl_spin);
288
289 gl->gl_state = LM_ST_UNLOCKED; 284 gl->gl_state = LM_ST_UNLOCKED;
290 gl->gl_owner = NULL; 285 gl->gl_owner = NULL;
291 gl->gl_ip = 0; 286 gl->gl_ip = 0;
292 INIT_LIST_HEAD(&gl->gl_holders);
293 INIT_LIST_HEAD(&gl->gl_waiters1);
294 INIT_LIST_HEAD(&gl->gl_waiters2);
295 INIT_LIST_HEAD(&gl->gl_waiters3);
296
297 gl->gl_ops = glops; 287 gl->gl_ops = glops;
298 288 gl->gl_req_gh = NULL;
289 gl->gl_req_bh = NULL;
290 gl->gl_vn = 0;
291 gl->gl_stamp = jiffies;
292 gl->gl_object = NULL;
299 gl->gl_bucket = bucket; 293 gl->gl_bucket = bucket;
300 INIT_LIST_HEAD(&gl->gl_reclaim);
301
302 gl->gl_sbd = sdp; 294 gl->gl_sbd = sdp;
303 295 gl->gl_aspace = NULL;
304 lops_init_le(&gl->gl_le, &gfs2_glock_lops); 296 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
305 INIT_LIST_HEAD(&gl->gl_ail_list);
306 297
307 /* If this glock protects actual on-disk data or metadata blocks, 298 /* If this glock protects actual on-disk data or metadata blocks,
308 create a VFS inode to manage the pages/buffers holding them. */ 299 create a VFS inode to manage the pages/buffers holding them. */
@@ -334,13 +325,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
334 325
335 return 0; 326 return 0;
336 327
337 fail_aspace: 328fail_aspace:
338 if (gl->gl_aspace) 329 if (gl->gl_aspace)
339 gfs2_aspace_put(gl->gl_aspace); 330 gfs2_aspace_put(gl->gl_aspace);
340 331fail:
341 fail:
342 kmem_cache_free(gfs2_glock_cachep, gl); 332 kmem_cache_free(gfs2_glock_cachep, gl);
343
344 return error; 333 return error;
345} 334}
346 335
@@ -495,9 +484,7 @@ static int rq_promote(struct gfs2_holder *gh)
495 gfs2_reclaim_glock(sdp); 484 gfs2_reclaim_glock(sdp);
496 } 485 }
497 486
498 glops->go_xmote_th(gl, gh->gh_state, 487 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
499 gh->gh_flags);
500
501 spin_lock(&gl->gl_spin); 488 spin_lock(&gl->gl_spin);
502 } 489 }
503 return 1; 490 return 1;
@@ -935,8 +922,7 @@ void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
935 gfs2_glock_hold(gl); 922 gfs2_glock_hold(gl);
936 gl->gl_req_bh = xmote_bh; 923 gl->gl_req_bh = xmote_bh;
937 924
938 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, 925 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
939 lck_flags);
940 926
941 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) 927 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
942 return; 928 return;
@@ -1019,8 +1005,7 @@ void gfs2_glock_drop_th(struct gfs2_glock *gl)
1019 1005
1020 if (gl->gl_state == LM_ST_EXCLUSIVE) { 1006 if (gl->gl_state == LM_ST_EXCLUSIVE) {
1021 if (glops->go_sync) 1007 if (glops->go_sync)
1022 glops->go_sync(gl, 1008 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
1023 DIO_METADATA | DIO_DATA | DIO_RELEASE);
1024 } 1009 }
1025 1010
1026 gfs2_glock_hold(gl); 1011 gfs2_glock_hold(gl);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index c112943ee8c1..dccc4f6f503f 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/gfs2_ondisk.h> 17#include <linux/gfs2_ondisk.h>
18#include <asm/atomic.h>
18 19
19#include "gfs2.h" 20#include "gfs2.h"
20#include "lm_interface.h" 21#include "lm_interface.h"
@@ -35,6 +36,25 @@ static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long
35 } 36 }
36} 37}
37 38
39static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
40{
41 struct gfs2_glock *gl = foo;
42 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
43 SLAB_CTOR_CONSTRUCTOR) {
44 INIT_LIST_HEAD(&gl->gl_list);
45 spin_lock_init(&gl->gl_spin);
46 INIT_LIST_HEAD(&gl->gl_holders);
47 INIT_LIST_HEAD(&gl->gl_waiters1);
48 INIT_LIST_HEAD(&gl->gl_waiters2);
49 INIT_LIST_HEAD(&gl->gl_waiters3);
50 gl->gl_lvb = NULL;
51 atomic_set(&gl->gl_lvb_count, 0);
52 INIT_LIST_HEAD(&gl->gl_reclaim);
53 INIT_LIST_HEAD(&gl->gl_ail_list);
54 atomic_set(&gl->gl_ail_count, 0);
55 }
56}
57
38/** 58/**
39 * init_gfs2_fs - Register GFS2 as a filesystem 59 * init_gfs2_fs - Register GFS2 as a filesystem
40 * 60 *
@@ -55,7 +75,8 @@ static int __init init_gfs2_fs(void)
55 75
56 gfs2_glock_cachep = kmem_cache_create("gfs2_glock", 76 gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
57 sizeof(struct gfs2_glock), 77 sizeof(struct gfs2_glock),
58 0, 0, NULL, NULL); 78 0, 0,
79 gfs2_init_glock_once, NULL);
59 if (!gfs2_glock_cachep) 80 if (!gfs2_glock_cachep)
60 goto fail; 81 goto fail;
61 82