aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glops.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2007-11-02 04:39:34 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2008-01-25 03:07:42 -0500
commit3042a2ccd68d2b609d283219e51cba363aa35c1d (patch)
tree032653f2111bf20c1f4610d3801c42020c3a1abd /fs/gfs2/glops.c
parent52d4c74b08bf859f698ddb4e8a43c0dc8d4a0685 (diff)
[GFS2] Reorder writeback for glock sync
Previously we were doing (write data, wait for data, write metadata, wait for metadata). After this patch we so (write metadata, write data, wait for data, wait for metadata) which should be more efficient. Also I noticed that the drop_bh and xmote_bh functions were almost identical. In fact the only difference was a single test, and that test is such that in the drop_bh case, it would always evaluate to the correct result. As such we can use the xmote_bh functions in all the places where we were using the drop_bh function and remove the drop_bh functions. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glops.c')
-rw-r--r--fs/gfs2/glops.c80
1 files changed, 16 insertions, 64 deletions
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index ba124230393b..c663b7a0f410 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -138,44 +138,34 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags)
138static void inode_go_sync(struct gfs2_glock *gl) 138static void inode_go_sync(struct gfs2_glock *gl)
139{ 139{
140 struct gfs2_inode *ip = gl->gl_object; 140 struct gfs2_inode *ip = gl->gl_object;
141 struct address_space *metamapping = gl->gl_aspace->i_mapping;
142 int error;
143
144 if (gl->gl_state != LM_ST_UNLOCKED)
145 gfs2_pte_inval(gl);
146 if (gl->gl_state != LM_ST_EXCLUSIVE)
147 return;
141 148
142 if (ip && !S_ISREG(ip->i_inode.i_mode)) 149 if (ip && !S_ISREG(ip->i_inode.i_mode))
143 ip = NULL; 150 ip = NULL;
144 151
145 if (test_bit(GLF_DIRTY, &gl->gl_flags)) { 152 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
146 if (ip && !gfs2_is_jdata(ip))
147 filemap_fdatawrite(ip->i_inode.i_mapping);
148 gfs2_log_flush(gl->gl_sbd, gl); 153 gfs2_log_flush(gl->gl_sbd, gl);
149 if (ip && gfs2_is_jdata(ip)) 154 filemap_fdatawrite(metamapping);
150 filemap_fdatawrite(ip->i_inode.i_mapping);
151 gfs2_meta_sync(gl);
152 if (ip) { 155 if (ip) {
153 struct address_space *mapping = ip->i_inode.i_mapping; 156 struct address_space *mapping = ip->i_inode.i_mapping;
154 int error = filemap_fdatawait(mapping); 157 filemap_fdatawrite(mapping);
158 error = filemap_fdatawait(mapping);
155 mapping_set_error(mapping, error); 159 mapping_set_error(mapping, error);
156 } 160 }
161 error = filemap_fdatawait(metamapping);
162 mapping_set_error(metamapping, error);
157 clear_bit(GLF_DIRTY, &gl->gl_flags); 163 clear_bit(GLF_DIRTY, &gl->gl_flags);
158 gfs2_ail_empty_gl(gl); 164 gfs2_ail_empty_gl(gl);
159 } 165 }
160} 166}
161 167
162/** 168/**
163 * inode_go_xmote_th - promote/demote a glock
164 * @gl: the glock
165 * @state: the requested state
166 * @flags:
167 *
168 */
169
170static void inode_go_xmote_th(struct gfs2_glock *gl)
171{
172 if (gl->gl_state != LM_ST_UNLOCKED)
173 gfs2_pte_inval(gl);
174 if (gl->gl_state == LM_ST_EXCLUSIVE)
175 inode_go_sync(gl);
176}
177
178/**
179 * inode_go_xmote_bh - After promoting/demoting a glock 169 * inode_go_xmote_bh - After promoting/demoting a glock
180 * @gl: the glock 170 * @gl: the glock
181 * 171 *
@@ -196,22 +186,6 @@ static void inode_go_xmote_bh(struct gfs2_glock *gl)
196} 186}
197 187
198/** 188/**
199 * inode_go_drop_th - unlock a glock
200 * @gl: the glock
201 *
202 * Invoked from rq_demote().
203 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
204 * is being purged from our node's glock cache; we're dropping lock.
205 */
206
207static void inode_go_drop_th(struct gfs2_glock *gl)
208{
209 gfs2_pte_inval(gl);
210 if (gl->gl_state == LM_ST_EXCLUSIVE)
211 inode_go_sync(gl);
212}
213
214/**
215 * inode_go_inval - prepare a inode glock to be released 189 * inode_go_inval - prepare a inode glock to be released
216 * @gl: the glock 190 * @gl: the glock
217 * @flags: 191 * @flags:
@@ -326,14 +300,14 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
326} 300}
327 301
328/** 302/**
329 * trans_go_xmote_th - promote/demote the transaction glock 303 * trans_go_sync - promote/demote the transaction glock
330 * @gl: the glock 304 * @gl: the glock
331 * @state: the requested state 305 * @state: the requested state
332 * @flags: 306 * @flags:
333 * 307 *
334 */ 308 */
335 309
336static void trans_go_xmote_th(struct gfs2_glock *gl) 310static void trans_go_sync(struct gfs2_glock *gl)
337{ 311{
338 struct gfs2_sbd *sdp = gl->gl_sbd; 312 struct gfs2_sbd *sdp = gl->gl_sbd;
339 313
@@ -377,24 +351,6 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
377} 351}
378 352
379/** 353/**
380 * trans_go_drop_th - unlock the transaction glock
381 * @gl: the glock
382 *
383 * We want to sync the device even with localcaching. Remember
384 * that localcaching journal replay only marks buffers dirty.
385 */
386
387static void trans_go_drop_th(struct gfs2_glock *gl)
388{
389 struct gfs2_sbd *sdp = gl->gl_sbd;
390
391 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
392 gfs2_meta_syncfs(sdp);
393 gfs2_log_shutdown(sdp);
394 }
395}
396
397/**
398 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock 354 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
399 * @gl: the glock 355 * @gl: the glock
400 * 356 *
@@ -408,14 +364,12 @@ static int quota_go_demote_ok(struct gfs2_glock *gl)
408 364
409const struct gfs2_glock_operations gfs2_meta_glops = { 365const struct gfs2_glock_operations gfs2_meta_glops = {
410 .go_xmote_th = meta_go_sync, 366 .go_xmote_th = meta_go_sync,
411 .go_drop_th = meta_go_sync,
412 .go_type = LM_TYPE_META, 367 .go_type = LM_TYPE_META,
413}; 368};
414 369
415const struct gfs2_glock_operations gfs2_inode_glops = { 370const struct gfs2_glock_operations gfs2_inode_glops = {
416 .go_xmote_th = inode_go_xmote_th, 371 .go_xmote_th = inode_go_sync,
417 .go_xmote_bh = inode_go_xmote_bh, 372 .go_xmote_bh = inode_go_xmote_bh,
418 .go_drop_th = inode_go_drop_th,
419 .go_inval = inode_go_inval, 373 .go_inval = inode_go_inval,
420 .go_demote_ok = inode_go_demote_ok, 374 .go_demote_ok = inode_go_demote_ok,
421 .go_lock = inode_go_lock, 375 .go_lock = inode_go_lock,
@@ -425,7 +379,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
425 379
426const struct gfs2_glock_operations gfs2_rgrp_glops = { 380const struct gfs2_glock_operations gfs2_rgrp_glops = {
427 .go_xmote_th = meta_go_sync, 381 .go_xmote_th = meta_go_sync,
428 .go_drop_th = meta_go_sync,
429 .go_inval = meta_go_inval, 382 .go_inval = meta_go_inval,
430 .go_demote_ok = rgrp_go_demote_ok, 383 .go_demote_ok = rgrp_go_demote_ok,
431 .go_lock = rgrp_go_lock, 384 .go_lock = rgrp_go_lock,
@@ -435,9 +388,8 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
435}; 388};
436 389
437const struct gfs2_glock_operations gfs2_trans_glops = { 390const struct gfs2_glock_operations gfs2_trans_glops = {
438 .go_xmote_th = trans_go_xmote_th, 391 .go_xmote_th = trans_go_sync,
439 .go_xmote_bh = trans_go_xmote_bh, 392 .go_xmote_bh = trans_go_xmote_bh,
440 .go_drop_th = trans_go_drop_th,
441 .go_type = LM_TYPE_NONDISK, 393 .go_type = LM_TYPE_NONDISK,
442}; 394};
443 395