aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c13
-rw-r--r--fs/gfs2/glops.c80
2 files changed, 22 insertions, 71 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 159a5479c4e4..e668808b127f 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -947,8 +947,8 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl)
947 const struct gfs2_glock_operations *glops = gl->gl_ops; 947 const struct gfs2_glock_operations *glops = gl->gl_ops;
948 unsigned int ret; 948 unsigned int ret;
949 949
950 if (glops->go_drop_th) 950 if (glops->go_xmote_th)
951 glops->go_drop_th(gl); 951 glops->go_xmote_th(gl);
952 952
953 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 953 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
954 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 954 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
@@ -1252,12 +1252,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1252 list_del_init(&gh->gh_list); 1252 list_del_init(&gh->gh_list);
1253 1253
1254 if (list_empty(&gl->gl_holders)) { 1254 if (list_empty(&gl->gl_holders)) {
1255 spin_unlock(&gl->gl_spin); 1255 if (glops->go_unlock) {
1256 1256 spin_unlock(&gl->gl_spin);
1257 if (glops->go_unlock)
1258 glops->go_unlock(gh); 1257 glops->go_unlock(gh);
1259 1258 spin_lock(&gl->gl_spin);
1260 spin_lock(&gl->gl_spin); 1259 }
1261 gl->gl_stamp = jiffies; 1260 gl->gl_stamp = jiffies;
1262 } 1261 }
1263 1262
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index ba124230393b..c663b7a0f410 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -138,44 +138,34 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags)
138static void inode_go_sync(struct gfs2_glock *gl) 138static void inode_go_sync(struct gfs2_glock *gl)
139{ 139{
140 struct gfs2_inode *ip = gl->gl_object; 140 struct gfs2_inode *ip = gl->gl_object;
141 struct address_space *metamapping = gl->gl_aspace->i_mapping;
142 int error;
143
144 if (gl->gl_state != LM_ST_UNLOCKED)
145 gfs2_pte_inval(gl);
146 if (gl->gl_state != LM_ST_EXCLUSIVE)
147 return;
141 148
142 if (ip && !S_ISREG(ip->i_inode.i_mode)) 149 if (ip && !S_ISREG(ip->i_inode.i_mode))
143 ip = NULL; 150 ip = NULL;
144 151
145 if (test_bit(GLF_DIRTY, &gl->gl_flags)) { 152 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
146 if (ip && !gfs2_is_jdata(ip))
147 filemap_fdatawrite(ip->i_inode.i_mapping);
148 gfs2_log_flush(gl->gl_sbd, gl); 153 gfs2_log_flush(gl->gl_sbd, gl);
149 if (ip && gfs2_is_jdata(ip)) 154 filemap_fdatawrite(metamapping);
150 filemap_fdatawrite(ip->i_inode.i_mapping);
151 gfs2_meta_sync(gl);
152 if (ip) { 155 if (ip) {
153 struct address_space *mapping = ip->i_inode.i_mapping; 156 struct address_space *mapping = ip->i_inode.i_mapping;
154 int error = filemap_fdatawait(mapping); 157 filemap_fdatawrite(mapping);
158 error = filemap_fdatawait(mapping);
155 mapping_set_error(mapping, error); 159 mapping_set_error(mapping, error);
156 } 160 }
161 error = filemap_fdatawait(metamapping);
162 mapping_set_error(metamapping, error);
157 clear_bit(GLF_DIRTY, &gl->gl_flags); 163 clear_bit(GLF_DIRTY, &gl->gl_flags);
158 gfs2_ail_empty_gl(gl); 164 gfs2_ail_empty_gl(gl);
159 } 165 }
160} 166}
161 167
162/** 168/**
163 * inode_go_xmote_th - promote/demote a glock
164 * @gl: the glock
165 * @state: the requested state
166 * @flags:
167 *
168 */
169
170static void inode_go_xmote_th(struct gfs2_glock *gl)
171{
172 if (gl->gl_state != LM_ST_UNLOCKED)
173 gfs2_pte_inval(gl);
174 if (gl->gl_state == LM_ST_EXCLUSIVE)
175 inode_go_sync(gl);
176}
177
178/**
179 * inode_go_xmote_bh - After promoting/demoting a glock 169 * inode_go_xmote_bh - After promoting/demoting a glock
180 * @gl: the glock 170 * @gl: the glock
181 * 171 *
@@ -196,22 +186,6 @@ static void inode_go_xmote_bh(struct gfs2_glock *gl)
196} 186}
197 187
198/** 188/**
199 * inode_go_drop_th - unlock a glock
200 * @gl: the glock
201 *
202 * Invoked from rq_demote().
203 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
204 * is being purged from our node's glock cache; we're dropping lock.
205 */
206
207static void inode_go_drop_th(struct gfs2_glock *gl)
208{
209 gfs2_pte_inval(gl);
210 if (gl->gl_state == LM_ST_EXCLUSIVE)
211 inode_go_sync(gl);
212}
213
214/**
215 * inode_go_inval - prepare a inode glock to be released 189 * inode_go_inval - prepare a inode glock to be released
216 * @gl: the glock 190 * @gl: the glock
217 * @flags: 191 * @flags:
@@ -326,14 +300,14 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
326} 300}
327 301
328/** 302/**
329 * trans_go_xmote_th - promote/demote the transaction glock 303 * trans_go_sync - promote/demote the transaction glock
330 * @gl: the glock 304 * @gl: the glock
331 * @state: the requested state 305 * @state: the requested state
332 * @flags: 306 * @flags:
333 * 307 *
334 */ 308 */
335 309
336static void trans_go_xmote_th(struct gfs2_glock *gl) 310static void trans_go_sync(struct gfs2_glock *gl)
337{ 311{
338 struct gfs2_sbd *sdp = gl->gl_sbd; 312 struct gfs2_sbd *sdp = gl->gl_sbd;
339 313
@@ -377,24 +351,6 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
377} 351}
378 352
379/** 353/**
380 * trans_go_drop_th - unlock the transaction glock
381 * @gl: the glock
382 *
383 * We want to sync the device even with localcaching. Remember
384 * that localcaching journal replay only marks buffers dirty.
385 */
386
387static void trans_go_drop_th(struct gfs2_glock *gl)
388{
389 struct gfs2_sbd *sdp = gl->gl_sbd;
390
391 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
392 gfs2_meta_syncfs(sdp);
393 gfs2_log_shutdown(sdp);
394 }
395}
396
397/**
398 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock 354 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
399 * @gl: the glock 355 * @gl: the glock
400 * 356 *
@@ -408,14 +364,12 @@ static int quota_go_demote_ok(struct gfs2_glock *gl)
408 364
409const struct gfs2_glock_operations gfs2_meta_glops = { 365const struct gfs2_glock_operations gfs2_meta_glops = {
410 .go_xmote_th = meta_go_sync, 366 .go_xmote_th = meta_go_sync,
411 .go_drop_th = meta_go_sync,
412 .go_type = LM_TYPE_META, 367 .go_type = LM_TYPE_META,
413}; 368};
414 369
415const struct gfs2_glock_operations gfs2_inode_glops = { 370const struct gfs2_glock_operations gfs2_inode_glops = {
416 .go_xmote_th = inode_go_xmote_th, 371 .go_xmote_th = inode_go_sync,
417 .go_xmote_bh = inode_go_xmote_bh, 372 .go_xmote_bh = inode_go_xmote_bh,
418 .go_drop_th = inode_go_drop_th,
419 .go_inval = inode_go_inval, 373 .go_inval = inode_go_inval,
420 .go_demote_ok = inode_go_demote_ok, 374 .go_demote_ok = inode_go_demote_ok,
421 .go_lock = inode_go_lock, 375 .go_lock = inode_go_lock,
@@ -425,7 +379,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
425 379
426const struct gfs2_glock_operations gfs2_rgrp_glops = { 380const struct gfs2_glock_operations gfs2_rgrp_glops = {
427 .go_xmote_th = meta_go_sync, 381 .go_xmote_th = meta_go_sync,
428 .go_drop_th = meta_go_sync,
429 .go_inval = meta_go_inval, 382 .go_inval = meta_go_inval,
430 .go_demote_ok = rgrp_go_demote_ok, 383 .go_demote_ok = rgrp_go_demote_ok,
431 .go_lock = rgrp_go_lock, 384 .go_lock = rgrp_go_lock,
@@ -435,9 +388,8 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
435}; 388};
436 389
437const struct gfs2_glock_operations gfs2_trans_glops = { 390const struct gfs2_glock_operations gfs2_trans_glops = {
438 .go_xmote_th = trans_go_xmote_th, 391 .go_xmote_th = trans_go_sync,
439 .go_xmote_bh = trans_go_xmote_bh, 392 .go_xmote_bh = trans_go_xmote_bh,
440 .go_drop_th = trans_go_drop_th,
441 .go_type = LM_TYPE_NONDISK, 393 .go_type = LM_TYPE_NONDISK,
442}; 394};
443 395