aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2010-11-30 10:49:31 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2010-11-30 10:49:31 -0500
commit47a25380e37f44db7202093ca92e4af569c34f55 (patch)
treedb3e6dba3859c5562b9a86f6d4059519fa7a1c52 /fs/gfs2/glock.c
parente06dfc492870e1d380f02722cde084b724dc197b (diff)
GFS2: Merge glock state fields into a bitfield
We can only merge the fields into a bitfield if the locking rules for them are the same. In this case gl_spin covers all of the fields (write side) but a couple of them are used with GLF_LOCK as the read side lock, which should be ok since we know that the field in question won't be changing at the time. The gl_req setting has to be done earlier (in glock.c) in order to place it under gl_spin. The gl_reply setting also has to be brought under gl_spin in order to comply with the new rules. This saves 4*sizeof(unsigned int) per glock. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Bob Peterson <rpeterso@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 2dd1d7238111..08a8beb152e6 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -567,6 +567,7 @@ __acquires(&gl->gl_spin)
567 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 567 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
568 do_error(gl, 0); /* Fail queued try locks */ 568 do_error(gl, 0); /* Fail queued try locks */
569 } 569 }
570 gl->gl_req = target;
570 spin_unlock(&gl->gl_spin); 571 spin_unlock(&gl->gl_spin);
571 if (glops->go_xmote_th) 572 if (glops->go_xmote_th)
572 glops->go_xmote_th(gl); 573 glops->go_xmote_th(gl);
@@ -1353,24 +1354,28 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
1353 * @gl: Pointer to the glock 1354 * @gl: Pointer to the glock
1354 * @ret: The return value from the dlm 1355 * @ret: The return value from the dlm
1355 * 1356 *
1357 * The gl_reply field is under the gl_spin lock so that it is ok
1358 * to use a bitfield shared with other glock state fields.
1356 */ 1359 */
1357 1360
1358void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1361void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1359{ 1362{
1360 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 1363 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1361 1364
1365 spin_lock(&gl->gl_spin);
1362 gl->gl_reply = ret; 1366 gl->gl_reply = ret;
1363 1367
1364 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { 1368 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1365 spin_lock(&gl->gl_spin);
1366 if (gfs2_should_freeze(gl)) { 1369 if (gfs2_should_freeze(gl)) {
1367 set_bit(GLF_FROZEN, &gl->gl_flags); 1370 set_bit(GLF_FROZEN, &gl->gl_flags);
1368 spin_unlock(&gl->gl_spin); 1371 spin_unlock(&gl->gl_spin);
1369 return; 1372 return;
1370 } 1373 }
1371 spin_unlock(&gl->gl_spin);
1372 } 1374 }
1375
1376 spin_unlock(&gl->gl_spin);
1373 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1377 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1378 smp_wmb();
1374 gfs2_glock_hold(gl); 1379 gfs2_glock_hold(gl);
1375 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1380 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1376 gfs2_glock_put(gl); 1381 gfs2_glock_put(gl);