aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2012-11-14 13:46:53 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2012-11-15 05:16:59 -0500
commitdba2d70c5dc520fdb569d1fd8dbd45c0e330253e (patch)
tree35cee899f98a6d863d48ecc2dc891b9dbb38b27e /fs/gfs2/glock.c
parentfb6791d100d1bba20b5cdbc4912e1f7086ec60f8 (diff)
GFS2: only use lvb on glocks that need it
Save the effort of allocating, reading and writing the lvb for most glocks that do not use it. Signed-off-by: David Teigland <teigland@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9d29a5167d34..2284de4d05ce 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -105,10 +105,12 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
105{ 105{
106 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 106 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
107 107
108 if (gl->gl_ops->go_flags & GLOF_ASPACE) 108 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
109 kmem_cache_free(gfs2_glock_aspace_cachep, gl); 109 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
110 else 110 } else {
111 kfree(gl->gl_lvb);
111 kmem_cache_free(gfs2_glock_cachep, gl); 112 kmem_cache_free(gfs2_glock_cachep, gl);
113 }
112} 114}
113 115
114void gfs2_glock_free(struct gfs2_glock *gl) 116void gfs2_glock_free(struct gfs2_glock *gl)
@@ -545,7 +547,10 @@ __acquires(&gl->gl_spin)
545 if (sdp->sd_lockstruct.ls_ops->lm_lock) { 547 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
546 /* lock_dlm */ 548 /* lock_dlm */
547 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); 549 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
548 GLOCK_BUG_ON(gl, ret); 550 if (ret) {
551 printk(KERN_ERR "GFS2: lm_lock ret %d\n", ret);
552 GLOCK_BUG_ON(gl, 1);
553 }
549 } else { /* lock_nolock */ 554 } else { /* lock_nolock */
550 finish_xmote(gl, target); 555 finish_xmote(gl, target);
551 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 556 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
@@ -734,6 +739,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
734 if (!gl) 739 if (!gl)
735 return -ENOMEM; 740 return -ENOMEM;
736 741
742 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
743 gl->gl_lvb = NULL;
744
745 if (glops->go_flags & GLOF_LVB) {
746 gl->gl_lvb = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
747 if (!gl->gl_lvb) {
748 kmem_cache_free(cachep, gl);
749 return -ENOMEM;
750 }
751 gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
752 }
753
737 atomic_inc(&sdp->sd_glock_disposal); 754 atomic_inc(&sdp->sd_glock_disposal);
738 gl->gl_sbd = sdp; 755 gl->gl_sbd = sdp;
739 gl->gl_flags = 0; 756 gl->gl_flags = 0;
@@ -751,9 +768,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
751 preempt_enable(); 768 preempt_enable();
752 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 769 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
753 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 770 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
754 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
755 memset(gl->gl_lvb, 0, 32 * sizeof(char));
756 gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
757 gl->gl_tchange = jiffies; 771 gl->gl_tchange = jiffies;
758 gl->gl_object = NULL; 772 gl->gl_object = NULL;
759 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 773 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
@@ -775,6 +789,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
775 tmp = search_bucket(hash, sdp, &name); 789 tmp = search_bucket(hash, sdp, &name);
776 if (tmp) { 790 if (tmp) {
777 spin_unlock_bucket(hash); 791 spin_unlock_bucket(hash);
792 kfree(gl->gl_lvb);
778 kmem_cache_free(cachep, gl); 793 kmem_cache_free(cachep, gl);
779 atomic_dec(&sdp->sd_glock_disposal); 794 atomic_dec(&sdp->sd_glock_disposal);
780 gl = tmp; 795 gl = tmp;