aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2013-04-10 05:26:55 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2013-04-10 05:26:55 -0400
commit81ffbf654f0cfeeb44e69832b3d301958a4108d8 (patch)
tree7c9c2d1c91dc2f696ea7cd0db25a28987195fcf6 /fs/gfs2
parent16ca9412d8018188bddda29c3fee88471b94e3cb (diff)
GFS2: Add origin indicator to glock callbacks
This patch adds a bool indicating whether the demote request was originated locally or remotely. This is then used by the iopen ->go_callback() to make 100% sure that it will only respond to remote callbacks. Since ->evict_inode() uses GL_NOCACHE when it attempts to get an exclusive lock on the iopen lock, this may result in extra scheduling of the workqueue in case that the exclusive promotion request failed. This patch prevents that from happening. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c12
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/incore.h2
3 files changed, 9 insertions, 9 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 6e30fd17c55a..77d7927bcd75 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -912,7 +912,7 @@ int gfs2_glock_wait(struct gfs2_holder *gh)
912 */ 912 */
913 913
914static void handle_callback(struct gfs2_glock *gl, unsigned int state, 914static void handle_callback(struct gfs2_glock *gl, unsigned int state,
915 unsigned long delay) 915 unsigned long delay, bool remote)
916{ 916{
917 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; 917 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
918 918
@@ -925,7 +925,7 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
925 gl->gl_demote_state = LM_ST_UNLOCKED; 925 gl->gl_demote_state = LM_ST_UNLOCKED;
926 } 926 }
927 if (gl->gl_ops->go_callback) 927 if (gl->gl_ops->go_callback)
928 gl->gl_ops->go_callback(gl); 928 gl->gl_ops->go_callback(gl, remote);
929 trace_gfs2_demote_rq(gl); 929 trace_gfs2_demote_rq(gl);
930} 930}
931 931
@@ -1091,7 +1091,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1091 1091
1092 spin_lock(&gl->gl_spin); 1092 spin_lock(&gl->gl_spin);
1093 if (gh->gh_flags & GL_NOCACHE) 1093 if (gh->gh_flags & GL_NOCACHE)
1094 handle_callback(gl, LM_ST_UNLOCKED, 0); 1094 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1095 1095
1096 list_del_init(&gh->gh_list); 1096 list_del_init(&gh->gh_list);
1097 if (find_first_holder(gl) == NULL) { 1097 if (find_first_holder(gl) == NULL) {
@@ -1296,7 +1296,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1296 } 1296 }
1297 1297
1298 spin_lock(&gl->gl_spin); 1298 spin_lock(&gl->gl_spin);
1299 handle_callback(gl, state, delay); 1299 handle_callback(gl, state, delay, true);
1300 spin_unlock(&gl->gl_spin); 1300 spin_unlock(&gl->gl_spin);
1301 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1301 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1302 gfs2_glock_put(gl); 1302 gfs2_glock_put(gl);
@@ -1409,7 +1409,7 @@ __acquires(&lru_lock)
1409 spin_unlock(&lru_lock); 1409 spin_unlock(&lru_lock);
1410 spin_lock(&gl->gl_spin); 1410 spin_lock(&gl->gl_spin);
1411 if (demote_ok(gl)) 1411 if (demote_ok(gl))
1412 handle_callback(gl, LM_ST_UNLOCKED, 0); 1412 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1413 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1413 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1414 smp_mb__after_clear_bit(); 1414 smp_mb__after_clear_bit();
1415 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1415 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
@@ -1534,7 +1534,7 @@ static void clear_glock(struct gfs2_glock *gl)
1534 1534
1535 spin_lock(&gl->gl_spin); 1535 spin_lock(&gl->gl_spin);
1536 if (gl->gl_state != LM_ST_UNLOCKED) 1536 if (gl->gl_state != LM_ST_UNLOCKED)
1537 handle_callback(gl, LM_ST_UNLOCKED, 0); 1537 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1538 spin_unlock(&gl->gl_spin); 1538 spin_unlock(&gl->gl_spin);
1539 gfs2_glock_hold(gl); 1539 gfs2_glock_hold(gl);
1540 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1540 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 444b6503ebc4..c66e99c97571 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -515,12 +515,12 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
515 * 515 *
516 * gl_spin lock is held while calling this 516 * gl_spin lock is held while calling this
517 */ 517 */
518static void iopen_go_callback(struct gfs2_glock *gl) 518static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
519{ 519{
520 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 520 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
521 struct gfs2_sbd *sdp = gl->gl_sbd; 521 struct gfs2_sbd *sdp = gl->gl_sbd;
522 522
523 if (sdp->sd_vfs->s_flags & MS_RDONLY) 523 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
524 return; 524 return;
525 525
526 if (gl->gl_demote_state == LM_ST_UNLOCKED && 526 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 2532f7ec6b00..26aabd7caba7 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -210,7 +210,7 @@ struct gfs2_glock_operations {
210 int (*go_lock) (struct gfs2_holder *gh); 210 int (*go_lock) (struct gfs2_holder *gh);
211 void (*go_unlock) (struct gfs2_holder *gh); 211 void (*go_unlock) (struct gfs2_holder *gh);
212 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); 212 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
213 void (*go_callback) (struct gfs2_glock *gl); 213 void (*go_callback)(struct gfs2_glock *gl, bool remote);
214 const int go_type; 214 const int go_type;
215 const unsigned long go_flags; 215 const unsigned long go_flags;
216#define GLOF_ASPACE 1 216#define GLOF_ASPACE 1