aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/gfs2-glocks.txt6
-rw-r--r--fs/gfs2/glock.c84
-rw-r--r--fs/gfs2/glock.h4
-rw-r--r--fs/gfs2/glops.c10
-rw-r--r--fs/gfs2/incore.h3
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/rgrp.c4
7 files changed, 56 insertions, 57 deletions
diff --git a/Documentation/filesystems/gfs2-glocks.txt b/Documentation/filesystems/gfs2-glocks.txt
index fcc79957be63..1fb12f9dfe48 100644
--- a/Documentation/filesystems/gfs2-glocks.txt
+++ b/Documentation/filesystems/gfs2-glocks.txt
@@ -5,7 +5,7 @@ This documents the basic principles of the glock state machine
5internals. Each glock (struct gfs2_glock in fs/gfs2/incore.h) 5internals. Each glock (struct gfs2_glock in fs/gfs2/incore.h)
6has two main (internal) locks: 6has two main (internal) locks:
7 7
8 1. A spinlock (gl_spin) which protects the internal state such 8 1. A spinlock (gl_lockref.lock) which protects the internal state such
9 as gl_state, gl_target and the list of holders (gl_holders) 9 as gl_state, gl_target and the list of holders (gl_holders)
10 2. A non-blocking bit lock, GLF_LOCK, which is used to prevent other 10 2. A non-blocking bit lock, GLF_LOCK, which is used to prevent other
11 threads from making calls to the DLM, etc. at the same time. If a 11 threads from making calls to the DLM, etc. at the same time. If a
@@ -82,8 +82,8 @@ rather than via the glock.
82 82
83Locking rules for glock operations: 83Locking rules for glock operations:
84 84
85Operation | GLF_LOCK bit lock held | gl_spin spinlock held 85Operation | GLF_LOCK bit lock held | gl_lockref.lock spinlock held
86----------------------------------------------------------------- 86-------------------------------------------------------------------------
87go_xmote_th | Yes | No 87go_xmote_th | Yes | No
88go_xmote_bh | Yes | No 88go_xmote_bh | Yes | No
89go_inval | Yes | No 89go_inval | Yes | No
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9bd1244caf38..32e74710b1aa 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -246,8 +246,8 @@ static inline void do_error(struct gfs2_glock *gl, const int ret)
246 */ 246 */
247 247
248static int do_promote(struct gfs2_glock *gl) 248static int do_promote(struct gfs2_glock *gl)
249__releases(&gl->gl_spin) 249__releases(&gl->gl_lockref.lock)
250__acquires(&gl->gl_spin) 250__acquires(&gl->gl_lockref.lock)
251{ 251{
252 const struct gfs2_glock_operations *glops = gl->gl_ops; 252 const struct gfs2_glock_operations *glops = gl->gl_ops;
253 struct gfs2_holder *gh, *tmp; 253 struct gfs2_holder *gh, *tmp;
@@ -260,10 +260,10 @@ restart:
260 if (may_grant(gl, gh)) { 260 if (may_grant(gl, gh)) {
261 if (gh->gh_list.prev == &gl->gl_holders && 261 if (gh->gh_list.prev == &gl->gl_holders &&
262 glops->go_lock) { 262 glops->go_lock) {
263 spin_unlock(&gl->gl_spin); 263 spin_unlock(&gl->gl_lockref.lock);
264 /* FIXME: eliminate this eventually */ 264 /* FIXME: eliminate this eventually */
265 ret = glops->go_lock(gh); 265 ret = glops->go_lock(gh);
266 spin_lock(&gl->gl_spin); 266 spin_lock(&gl->gl_lockref.lock);
267 if (ret) { 267 if (ret) {
268 if (ret == 1) 268 if (ret == 1)
269 return 2; 269 return 2;
@@ -361,7 +361,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
361 unsigned state = ret & LM_OUT_ST_MASK; 361 unsigned state = ret & LM_OUT_ST_MASK;
362 int rv; 362 int rv;
363 363
364 spin_lock(&gl->gl_spin); 364 spin_lock(&gl->gl_lockref.lock);
365 trace_gfs2_glock_state_change(gl, state); 365 trace_gfs2_glock_state_change(gl, state);
366 state_change(gl, state); 366 state_change(gl, state);
367 gh = find_first_waiter(gl); 367 gh = find_first_waiter(gl);
@@ -405,7 +405,7 @@ retry:
405 pr_err("wanted %u got %u\n", gl->gl_target, state); 405 pr_err("wanted %u got %u\n", gl->gl_target, state);
406 GLOCK_BUG_ON(gl, 1); 406 GLOCK_BUG_ON(gl, 1);
407 } 407 }
408 spin_unlock(&gl->gl_spin); 408 spin_unlock(&gl->gl_lockref.lock);
409 return; 409 return;
410 } 410 }
411 411
@@ -414,9 +414,9 @@ retry:
414 gfs2_demote_wake(gl); 414 gfs2_demote_wake(gl);
415 if (state != LM_ST_UNLOCKED) { 415 if (state != LM_ST_UNLOCKED) {
416 if (glops->go_xmote_bh) { 416 if (glops->go_xmote_bh) {
417 spin_unlock(&gl->gl_spin); 417 spin_unlock(&gl->gl_lockref.lock);
418 rv = glops->go_xmote_bh(gl, gh); 418 rv = glops->go_xmote_bh(gl, gh);
419 spin_lock(&gl->gl_spin); 419 spin_lock(&gl->gl_lockref.lock);
420 if (rv) { 420 if (rv) {
421 do_error(gl, rv); 421 do_error(gl, rv);
422 goto out; 422 goto out;
@@ -429,7 +429,7 @@ retry:
429out: 429out:
430 clear_bit(GLF_LOCK, &gl->gl_flags); 430 clear_bit(GLF_LOCK, &gl->gl_flags);
431out_locked: 431out_locked:
432 spin_unlock(&gl->gl_spin); 432 spin_unlock(&gl->gl_lockref.lock);
433} 433}
434 434
435/** 435/**
@@ -441,8 +441,8 @@ out_locked:
441 */ 441 */
442 442
443static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) 443static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
444__releases(&gl->gl_spin) 444__releases(&gl->gl_lockref.lock)
445__acquires(&gl->gl_spin) 445__acquires(&gl->gl_lockref.lock)
446{ 446{
447 const struct gfs2_glock_operations *glops = gl->gl_ops; 447 const struct gfs2_glock_operations *glops = gl->gl_ops;
448 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 448 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
@@ -464,7 +464,7 @@ __acquires(&gl->gl_spin)
464 (gl->gl_state == LM_ST_EXCLUSIVE) || 464 (gl->gl_state == LM_ST_EXCLUSIVE) ||
465 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) 465 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
466 clear_bit(GLF_BLOCKING, &gl->gl_flags); 466 clear_bit(GLF_BLOCKING, &gl->gl_flags);
467 spin_unlock(&gl->gl_spin); 467 spin_unlock(&gl->gl_lockref.lock);
468 if (glops->go_sync) 468 if (glops->go_sync)
469 glops->go_sync(gl); 469 glops->go_sync(gl);
470 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 470 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
@@ -485,7 +485,7 @@ __acquires(&gl->gl_spin)
485 gfs2_glock_put(gl); 485 gfs2_glock_put(gl);
486 } 486 }
487 487
488 spin_lock(&gl->gl_spin); 488 spin_lock(&gl->gl_lockref.lock);
489} 489}
490 490
491/** 491/**
@@ -513,8 +513,8 @@ static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
513 */ 513 */
514 514
515static void run_queue(struct gfs2_glock *gl, const int nonblock) 515static void run_queue(struct gfs2_glock *gl, const int nonblock)
516__releases(&gl->gl_spin) 516__releases(&gl->gl_lockref.lock)
517__acquires(&gl->gl_spin) 517__acquires(&gl->gl_lockref.lock)
518{ 518{
519 struct gfs2_holder *gh = NULL; 519 struct gfs2_holder *gh = NULL;
520 int ret; 520 int ret;
@@ -596,7 +596,7 @@ static void glock_work_func(struct work_struct *work)
596 finish_xmote(gl, gl->gl_reply); 596 finish_xmote(gl, gl->gl_reply);
597 drop_ref = 1; 597 drop_ref = 1;
598 } 598 }
599 spin_lock(&gl->gl_spin); 599 spin_lock(&gl->gl_lockref.lock);
600 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 600 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
601 gl->gl_state != LM_ST_UNLOCKED && 601 gl->gl_state != LM_ST_UNLOCKED &&
602 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 602 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
@@ -612,7 +612,7 @@ static void glock_work_func(struct work_struct *work)
612 } 612 }
613 } 613 }
614 run_queue(gl, 0); 614 run_queue(gl, 0);
615 spin_unlock(&gl->gl_spin); 615 spin_unlock(&gl->gl_lockref.lock);
616 if (!delay) 616 if (!delay)
617 gfs2_glock_put(gl); 617 gfs2_glock_put(gl);
618 else { 618 else {
@@ -876,8 +876,8 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
876 */ 876 */
877 877
878static inline void add_to_queue(struct gfs2_holder *gh) 878static inline void add_to_queue(struct gfs2_holder *gh)
879__releases(&gl->gl_spin) 879__releases(&gl->gl_lockref.lock)
880__acquires(&gl->gl_spin) 880__acquires(&gl->gl_lockref.lock)
881{ 881{
882 struct gfs2_glock *gl = gh->gh_gl; 882 struct gfs2_glock *gl = gh->gh_gl;
883 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 883 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
@@ -926,10 +926,10 @@ fail:
926do_cancel: 926do_cancel:
927 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 927 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
928 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { 928 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
929 spin_unlock(&gl->gl_spin); 929 spin_unlock(&gl->gl_lockref.lock);
930 if (sdp->sd_lockstruct.ls_ops->lm_cancel) 930 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
931 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 931 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
932 spin_lock(&gl->gl_spin); 932 spin_lock(&gl->gl_lockref.lock);
933 } 933 }
934 return; 934 return;
935 935
@@ -967,7 +967,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
967 if (test_bit(GLF_LRU, &gl->gl_flags)) 967 if (test_bit(GLF_LRU, &gl->gl_flags))
968 gfs2_glock_remove_from_lru(gl); 968 gfs2_glock_remove_from_lru(gl);
969 969
970 spin_lock(&gl->gl_spin); 970 spin_lock(&gl->gl_lockref.lock);
971 add_to_queue(gh); 971 add_to_queue(gh);
972 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && 972 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
973 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { 973 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
@@ -977,7 +977,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
977 gl->gl_lockref.count--; 977 gl->gl_lockref.count--;
978 } 978 }
979 run_queue(gl, 1); 979 run_queue(gl, 1);
980 spin_unlock(&gl->gl_spin); 980 spin_unlock(&gl->gl_lockref.lock);
981 981
982 if (!(gh->gh_flags & GL_ASYNC)) 982 if (!(gh->gh_flags & GL_ASYNC))
983 error = gfs2_glock_wait(gh); 983 error = gfs2_glock_wait(gh);
@@ -1010,7 +1010,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1010 unsigned delay = 0; 1010 unsigned delay = 0;
1011 int fast_path = 0; 1011 int fast_path = 0;
1012 1012
1013 spin_lock(&gl->gl_spin); 1013 spin_lock(&gl->gl_lockref.lock);
1014 if (gh->gh_flags & GL_NOCACHE) 1014 if (gh->gh_flags & GL_NOCACHE)
1015 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1015 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1016 1016
@@ -1018,9 +1018,9 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1018 if (find_first_holder(gl) == NULL) { 1018 if (find_first_holder(gl) == NULL) {
1019 if (glops->go_unlock) { 1019 if (glops->go_unlock) {
1020 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags)); 1020 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1021 spin_unlock(&gl->gl_spin); 1021 spin_unlock(&gl->gl_lockref.lock);
1022 glops->go_unlock(gh); 1022 glops->go_unlock(gh);
1023 spin_lock(&gl->gl_spin); 1023 spin_lock(&gl->gl_lockref.lock);
1024 clear_bit(GLF_LOCK, &gl->gl_flags); 1024 clear_bit(GLF_LOCK, &gl->gl_flags);
1025 } 1025 }
1026 if (list_empty(&gl->gl_holders) && 1026 if (list_empty(&gl->gl_holders) &&
@@ -1033,7 +1033,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1033 gfs2_glock_add_to_lru(gl); 1033 gfs2_glock_add_to_lru(gl);
1034 1034
1035 trace_gfs2_glock_queue(gh, 0); 1035 trace_gfs2_glock_queue(gh, 0);
1036 spin_unlock(&gl->gl_spin); 1036 spin_unlock(&gl->gl_lockref.lock);
1037 if (likely(fast_path)) 1037 if (likely(fast_path))
1038 return; 1038 return;
1039 1039
@@ -1217,9 +1217,9 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1217 delay = gl->gl_hold_time; 1217 delay = gl->gl_hold_time;
1218 } 1218 }
1219 1219
1220 spin_lock(&gl->gl_spin); 1220 spin_lock(&gl->gl_lockref.lock);
1221 handle_callback(gl, state, delay, true); 1221 handle_callback(gl, state, delay, true);
1222 spin_unlock(&gl->gl_spin); 1222 spin_unlock(&gl->gl_lockref.lock);
1223 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1223 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1224 gfs2_glock_put(gl); 1224 gfs2_glock_put(gl);
1225} 1225}
@@ -1259,7 +1259,7 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
1259 * @gl: Pointer to the glock 1259 * @gl: Pointer to the glock
1260 * @ret: The return value from the dlm 1260 * @ret: The return value from the dlm
1261 * 1261 *
1262 * The gl_reply field is under the gl_spin lock so that it is ok 1262 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1263 * to use a bitfield shared with other glock state fields. 1263 * to use a bitfield shared with other glock state fields.
1264 */ 1264 */
1265 1265
@@ -1267,20 +1267,20 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1267{ 1267{
1268 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 1268 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1269 1269
1270 spin_lock(&gl->gl_spin); 1270 spin_lock(&gl->gl_lockref.lock);
1271 gl->gl_reply = ret; 1271 gl->gl_reply = ret;
1272 1272
1273 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1273 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1274 if (gfs2_should_freeze(gl)) { 1274 if (gfs2_should_freeze(gl)) {
1275 set_bit(GLF_FROZEN, &gl->gl_flags); 1275 set_bit(GLF_FROZEN, &gl->gl_flags);
1276 spin_unlock(&gl->gl_spin); 1276 spin_unlock(&gl->gl_lockref.lock);
1277 return; 1277 return;
1278 } 1278 }
1279 } 1279 }
1280 1280
1281 gl->gl_lockref.count++; 1281 gl->gl_lockref.count++;
1282 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1282 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1283 spin_unlock(&gl->gl_spin); 1283 spin_unlock(&gl->gl_lockref.lock);
1284 1284
1285 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1285 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1286 gfs2_glock_put(gl); 1286 gfs2_glock_put(gl);
@@ -1326,14 +1326,14 @@ __acquires(&lru_lock)
1326 while(!list_empty(list)) { 1326 while(!list_empty(list)) {
1327 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1327 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1328 list_del_init(&gl->gl_lru); 1328 list_del_init(&gl->gl_lru);
1329 if (!spin_trylock(&gl->gl_spin)) { 1329 if (!spin_trylock(&gl->gl_lockref.lock)) {
1330add_back_to_lru: 1330add_back_to_lru:
1331 list_add(&gl->gl_lru, &lru_list); 1331 list_add(&gl->gl_lru, &lru_list);
1332 atomic_inc(&lru_count); 1332 atomic_inc(&lru_count);
1333 continue; 1333 continue;
1334 } 1334 }
1335 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1335 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1336 spin_unlock(&gl->gl_spin); 1336 spin_unlock(&gl->gl_lockref.lock);
1337 goto add_back_to_lru; 1337 goto add_back_to_lru;
1338 } 1338 }
1339 clear_bit(GLF_LRU, &gl->gl_flags); 1339 clear_bit(GLF_LRU, &gl->gl_flags);
@@ -1343,7 +1343,7 @@ add_back_to_lru:
1343 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1343 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1344 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1344 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1345 gl->gl_lockref.count--; 1345 gl->gl_lockref.count--;
1346 spin_unlock(&gl->gl_spin); 1346 spin_unlock(&gl->gl_lockref.lock);
1347 cond_resched_lock(&lru_lock); 1347 cond_resched_lock(&lru_lock);
1348 } 1348 }
1349} 1349}
@@ -1461,10 +1461,10 @@ static void clear_glock(struct gfs2_glock *gl)
1461{ 1461{
1462 gfs2_glock_remove_from_lru(gl); 1462 gfs2_glock_remove_from_lru(gl);
1463 1463
1464 spin_lock(&gl->gl_spin); 1464 spin_lock(&gl->gl_lockref.lock);
1465 if (gl->gl_state != LM_ST_UNLOCKED) 1465 if (gl->gl_state != LM_ST_UNLOCKED)
1466 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1466 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1467 spin_unlock(&gl->gl_spin); 1467 spin_unlock(&gl->gl_lockref.lock);
1468 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1468 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1469 gfs2_glock_put(gl); 1469 gfs2_glock_put(gl);
1470} 1470}
@@ -1482,9 +1482,9 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1482 1482
1483static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl) 1483static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1484{ 1484{
1485 spin_lock(&gl->gl_spin); 1485 spin_lock(&gl->gl_lockref.lock);
1486 gfs2_dump_glock(seq, gl); 1486 gfs2_dump_glock(seq, gl);
1487 spin_unlock(&gl->gl_spin); 1487 spin_unlock(&gl->gl_lockref.lock);
1488} 1488}
1489 1489
1490static void dump_glock_func(struct gfs2_glock *gl) 1490static void dump_glock_func(struct gfs2_glock *gl)
@@ -1518,10 +1518,10 @@ void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1518 ret = gfs2_truncatei_resume(ip); 1518 ret = gfs2_truncatei_resume(ip);
1519 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0); 1519 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1520 1520
1521 spin_lock(&gl->gl_spin); 1521 spin_lock(&gl->gl_lockref.lock);
1522 clear_bit(GLF_LOCK, &gl->gl_flags); 1522 clear_bit(GLF_LOCK, &gl->gl_flags);
1523 run_queue(gl, 1); 1523 run_queue(gl, 1);
1524 spin_unlock(&gl->gl_spin); 1524 spin_unlock(&gl->gl_lockref.lock);
1525} 1525}
1526 1526
1527static const char *state2str(unsigned state) 1527static const char *state2str(unsigned state)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 32572f71f027..f7cdaa8b4c83 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -141,7 +141,7 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
141 struct pid *pid; 141 struct pid *pid;
142 142
143 /* Look in glock's list of holders for one with current task as owner */ 143 /* Look in glock's list of holders for one with current task as owner */
144 spin_lock(&gl->gl_spin); 144 spin_lock(&gl->gl_lockref.lock);
145 pid = task_pid(current); 145 pid = task_pid(current);
146 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 146 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
147 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 147 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
@@ -151,7 +151,7 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
151 } 151 }
152 gh = NULL; 152 gh = NULL;
153out: 153out:
154 spin_unlock(&gl->gl_spin); 154 spin_unlock(&gl->gl_lockref.lock);
155 155
156 return gh; 156 return gh;
157} 157}
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 1f6c9c3fe5cb..f348cfb6b69a 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -146,11 +146,11 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
146 struct gfs2_rgrpd *rgd; 146 struct gfs2_rgrpd *rgd;
147 int error; 147 int error;
148 148
149 spin_lock(&gl->gl_spin); 149 spin_lock(&gl->gl_lockref.lock);
150 rgd = gl->gl_object; 150 rgd = gl->gl_object;
151 if (rgd) 151 if (rgd)
152 gfs2_rgrp_brelse(rgd); 152 gfs2_rgrp_brelse(rgd);
153 spin_unlock(&gl->gl_spin); 153 spin_unlock(&gl->gl_lockref.lock);
154 154
155 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 155 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
156 return; 156 return;
@@ -162,11 +162,11 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
162 mapping_set_error(mapping, error); 162 mapping_set_error(mapping, error);
163 gfs2_ail_empty_gl(gl); 163 gfs2_ail_empty_gl(gl);
164 164
165 spin_lock(&gl->gl_spin); 165 spin_lock(&gl->gl_lockref.lock);
166 rgd = gl->gl_object; 166 rgd = gl->gl_object;
167 if (rgd) 167 if (rgd)
168 gfs2_free_clones(rgd); 168 gfs2_free_clones(rgd);
169 spin_unlock(&gl->gl_spin); 169 spin_unlock(&gl->gl_lockref.lock);
170} 170}
171 171
172/** 172/**
@@ -542,7 +542,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl)
542 * iopen_go_callback - schedule the dcache entry for the inode to be deleted 542 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
543 * @gl: the glock 543 * @gl: the glock
544 * 544 *
545 * gl_spin lock is held while calling this 545 * gl_lockref.lock lock is held while calling this
546 */ 546 */
547static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 547static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
548{ 548{
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 121ed08d9d9f..de7b4f97ac75 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -334,9 +334,8 @@ struct gfs2_glock {
334 struct lm_lockname gl_name; 334 struct lm_lockname gl_name;
335 335
336 struct lockref gl_lockref; 336 struct lockref gl_lockref;
337#define gl_spin gl_lockref.lock
338 337
339 /* State fields protected by gl_spin */ 338 /* State fields protected by gl_lockref.lock */
340 unsigned int gl_state:2, /* Current state */ 339 unsigned int gl_state:2, /* Current state */
341 gl_target:2, /* Target state */ 340 gl_target:2, /* Target state */
342 gl_demote_state:2, /* State requested by remote node */ 341 gl_demote_state:2, /* State requested by remote node */
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 241a399bf83d..fb2b42cf46b5 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -50,7 +50,7 @@ static void gfs2_init_glock_once(void *foo)
50 struct gfs2_glock *gl = foo; 50 struct gfs2_glock *gl = foo;
51 51
52 INIT_HLIST_BL_NODE(&gl->gl_list); 52 INIT_HLIST_BL_NODE(&gl->gl_list);
53 spin_lock_init(&gl->gl_spin); 53 spin_lock_init(&gl->gl_lockref.lock);
54 INIT_LIST_HEAD(&gl->gl_holders); 54 INIT_LIST_HEAD(&gl->gl_holders);
55 INIT_LIST_HEAD(&gl->gl_lru); 55 INIT_LIST_HEAD(&gl->gl_lru);
56 INIT_LIST_HEAD(&gl->gl_ail_list); 56 INIT_LIST_HEAD(&gl->gl_ail_list);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 475985d14758..d29dd0cec914 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -729,9 +729,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
729 rb_erase(n, &sdp->sd_rindex_tree); 729 rb_erase(n, &sdp->sd_rindex_tree);
730 730
731 if (gl) { 731 if (gl) {
732 spin_lock(&gl->gl_spin); 732 spin_lock(&gl->gl_lockref.lock);
733 gl->gl_object = NULL; 733 gl->gl_object = NULL;
734 spin_unlock(&gl->gl_spin); 734 spin_unlock(&gl->gl_lockref.lock);
735 gfs2_glock_add_to_lru(gl); 735 gfs2_glock_add_to_lru(gl);
736 gfs2_glock_put(gl); 736 gfs2_glock_put(gl);
737 } 737 }