aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/gfs2/glock.c31
-rw-r--r--fs/gfs2/glock.h12
-rw-r--r--fs/gfs2/lock_dlm.c14
3 files changed, 17 insertions, 40 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a9b53a48abeb..2dd1d7238111 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -541,21 +541,6 @@ out_locked:
541 spin_unlock(&gl->gl_spin); 541 spin_unlock(&gl->gl_spin);
542} 542}
543 543
544static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
545 unsigned int req_state,
546 unsigned int flags)
547{
548 int ret = LM_OUT_ERROR;
549
550 if (!sdp->sd_lockstruct.ls_ops->lm_lock)
551 return req_state == LM_ST_UNLOCKED ? 0 : req_state;
552
553 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
554 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
555 req_state, flags);
556 return ret;
557}
558
559/** 544/**
560 * do_xmote - Calls the DLM to change the state of a lock 545 * do_xmote - Calls the DLM to change the state of a lock
561 * @gl: The lock state 546 * @gl: The lock state
@@ -575,8 +560,8 @@ __acquires(&gl->gl_spin)
575 560
576 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 561 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
577 LM_FLAG_PRIORITY); 562 LM_FLAG_PRIORITY);
578 BUG_ON(gl->gl_state == target); 563 GLOCK_BUG_ON(gl, gl->gl_state == target);
579 BUG_ON(gl->gl_state == gl->gl_target); 564 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
580 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 565 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
581 glops->go_inval) { 566 glops->go_inval) {
582 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 567 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
@@ -594,15 +579,17 @@ __acquires(&gl->gl_spin)
594 gl->gl_state == LM_ST_DEFERRED) && 579 gl->gl_state == LM_ST_DEFERRED) &&
595 !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 580 !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
596 lck_flags |= LM_FLAG_TRY_1CB; 581 lck_flags |= LM_FLAG_TRY_1CB;
597 ret = gfs2_lm_lock(sdp, gl, target, lck_flags);
598 582
599 if (!(ret & LM_OUT_ASYNC)) { 583 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
600 finish_xmote(gl, ret); 584 /* lock_dlm */
585 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
586 GLOCK_BUG_ON(gl, ret);
587 } else { /* lock_nolock */
588 finish_xmote(gl, target);
601 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 589 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
602 gfs2_glock_put(gl); 590 gfs2_glock_put(gl);
603 } else {
604 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
605 } 591 }
592
606 spin_lock(&gl->gl_spin); 593 spin_lock(&gl->gl_spin);
607} 594}
608 595
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index a12d11767752..ba6865c6e7e0 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -91,7 +91,7 @@ enum {
91#define GL_NOCACHE 0x00000400 91#define GL_NOCACHE 0x00000400
92 92
93/* 93/*
94 * lm_lock() and lm_async_cb return flags 94 * lm_async_cb return flags
95 * 95 *
96 * LM_OUT_ST_MASK 96 * LM_OUT_ST_MASK
97 * Masks the lower two bits of lock state in the returned value. 97 * Masks the lower two bits of lock state in the returned value.
@@ -99,15 +99,11 @@ enum {
99 * LM_OUT_CANCELED 99 * LM_OUT_CANCELED
100 * The lock request was canceled. 100 * The lock request was canceled.
101 * 101 *
102 * LM_OUT_ASYNC
103 * The result of the request will be returned in an LM_CB_ASYNC callback.
104 *
105 */ 102 */
106 103
107#define LM_OUT_ST_MASK 0x00000003 104#define LM_OUT_ST_MASK 0x00000003
108#define LM_OUT_CANCELED 0x00000008 105#define LM_OUT_CANCELED 0x00000008
109#define LM_OUT_ASYNC 0x00000080 106#define LM_OUT_ERROR 0x00000004
110#define LM_OUT_ERROR 0x00000100
111 107
112/* 108/*
113 * lm_recovery_done() messages 109 * lm_recovery_done() messages
@@ -124,8 +120,8 @@ struct lm_lockops {
124 void (*lm_unmount) (struct gfs2_sbd *sdp); 120 void (*lm_unmount) (struct gfs2_sbd *sdp);
125 void (*lm_withdraw) (struct gfs2_sbd *sdp); 121 void (*lm_withdraw) (struct gfs2_sbd *sdp);
126 void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl); 122 void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl);
127 unsigned int (*lm_lock) (struct gfs2_glock *gl, 123 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
128 unsigned int req_state, unsigned int flags); 124 unsigned int flags);
129 void (*lm_cancel) (struct gfs2_glock *gl); 125 void (*lm_cancel) (struct gfs2_glock *gl);
130 const match_table_t *lm_tokens; 126 const match_table_t *lm_tokens;
131}; 127};
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 1c09425b45fd..f40ce34c803e 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -146,11 +146,10 @@ static u32 make_flags(const u32 lkid, const unsigned int gfs_flags,
146 return lkf; 146 return lkf;
147} 147}
148 148
149static unsigned int gdlm_lock(struct gfs2_glock *gl, 149static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
150 unsigned int req_state, unsigned int flags) 150 unsigned int flags)
151{ 151{
152 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 152 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
153 int error;
154 int req; 153 int req;
155 u32 lkf; 154 u32 lkf;
156 155
@@ -162,13 +161,8 @@ static unsigned int gdlm_lock(struct gfs2_glock *gl,
162 * Submit the actual lock request. 161 * Submit the actual lock request.
163 */ 162 */
164 163
165 error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, gl->gl_strname, 164 return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, gl->gl_strname,
166 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); 165 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
167 if (error == -EAGAIN)
168 return 0;
169 if (error)
170 return LM_OUT_ERROR;
171 return LM_OUT_ASYNC;
172} 166}
173 167
174static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl) 168static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)