aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-04-26 13:21:55 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-04-26 13:21:55 -0400
commit5965b1f4792a1a9364b4e1ed6be8778a50eb981b (patch)
tree77773aca2301eb2e11dc455e7152823bf3bde361 /fs/gfs2
parent3a2a9c96ac129d17aad1a5c46988ad28f72564b0 (diff)
[GFS2] Don't do recursive locking in glock layer
This patch changes the last user of recursive locking so that it no longer needs this feature and removes it from the glock layer. This makes the glock code a lot simpler and easier to understand. Its also a prerequsite to adding support for the AOP_TRUNCATED_PAGE return code (or at least it is if you don't want your brain to melt in the process) I've left in a couple of checks just in case there is some place else in the code which is still using this feature that I didn't spot yet, but they can probably be removed long term. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c162
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/recovery.c79
3 files changed, 46 insertions, 196 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 32cc4005307d..0f317155915d 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -450,86 +450,6 @@ void gfs2_holder_put(struct gfs2_holder *gh)
450} 450}
451 451
452/** 452/**
453 * handle_recurse - put other holder structures (marked recursive)
454 * into the holders list
455 * @gh: the holder structure
456 *
457 */
458
459static void handle_recurse(struct gfs2_holder *gh)
460{
461 struct gfs2_glock *gl = gh->gh_gl;
462 struct gfs2_sbd *sdp = gl->gl_sbd;
463 struct gfs2_holder *tmp_gh, *safe;
464 int found = 0;
465
466 BUG_ON(!spin_is_locked(&gl->gl_spin));
467
468 printk(KERN_INFO "recursion %016llx, %u\n", gl->gl_name.ln_number,
469 gl->gl_name.ln_type);
470
471 if (gfs2_assert_warn(sdp, gh->gh_owner))
472 return;
473
474 list_for_each_entry_safe(tmp_gh, safe, &gl->gl_waiters3, gh_list) {
475 if (tmp_gh->gh_owner != gh->gh_owner)
476 continue;
477
478 gfs2_assert_warn(sdp,
479 test_bit(HIF_RECURSE, &tmp_gh->gh_iflags));
480
481 list_move_tail(&tmp_gh->gh_list, &gl->gl_holders);
482 tmp_gh->gh_error = 0;
483 set_bit(HIF_HOLDER, &tmp_gh->gh_iflags);
484
485 complete(&tmp_gh->gh_wait);
486
487 found = 1;
488 }
489
490 gfs2_assert_warn(sdp, found);
491}
492
493/**
494 * do_unrecurse - a recursive holder was just dropped of the waiters3 list
495 * @gh: the holder
496 *
497 * If there is only one other recursive holder, clear its HIF_RECURSE bit.
498 * If there is more than one, leave them alone.
499 *
500 */
501
502static void do_unrecurse(struct gfs2_holder *gh)
503{
504 struct gfs2_glock *gl = gh->gh_gl;
505 struct gfs2_sbd *sdp = gl->gl_sbd;
506 struct gfs2_holder *tmp_gh, *last_gh = NULL;
507 int found = 0;
508
509 BUG_ON(!spin_is_locked(&gl->gl_spin));
510
511 if (gfs2_assert_warn(sdp, gh->gh_owner))
512 return;
513
514 list_for_each_entry(tmp_gh, &gl->gl_waiters3, gh_list) {
515 if (tmp_gh->gh_owner != gh->gh_owner)
516 continue;
517
518 gfs2_assert_warn(sdp,
519 test_bit(HIF_RECURSE, &tmp_gh->gh_iflags));
520
521 if (found)
522 return;
523
524 found = 1;
525 last_gh = tmp_gh;
526 }
527
528 if (!gfs2_assert_warn(sdp, found))
529 clear_bit(HIF_RECURSE, &last_gh->gh_iflags);
530}
531
532/**
533 * rq_mutex - process a mutex request in the queue 453 * rq_mutex - process a mutex request in the queue
534 * @gh: the glock holder 454 * @gh: the glock holder
535 * 455 *
@@ -562,7 +482,6 @@ static int rq_promote(struct gfs2_holder *gh)
562 struct gfs2_glock *gl = gh->gh_gl; 482 struct gfs2_glock *gl = gh->gh_gl;
563 struct gfs2_sbd *sdp = gl->gl_sbd; 483 struct gfs2_sbd *sdp = gl->gl_sbd;
564 struct gfs2_glock_operations *glops = gl->gl_ops; 484 struct gfs2_glock_operations *glops = gl->gl_ops;
565 int recurse;
566 485
567 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { 486 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
568 if (list_empty(&gl->gl_holders)) { 487 if (list_empty(&gl->gl_holders)) {
@@ -588,7 +507,6 @@ static int rq_promote(struct gfs2_holder *gh)
588 if (list_empty(&gl->gl_holders)) { 507 if (list_empty(&gl->gl_holders)) {
589 set_bit(HIF_FIRST, &gh->gh_iflags); 508 set_bit(HIF_FIRST, &gh->gh_iflags);
590 set_bit(GLF_LOCK, &gl->gl_flags); 509 set_bit(GLF_LOCK, &gl->gl_flags);
591 recurse = 0;
592 } else { 510 } else {
593 struct gfs2_holder *next_gh; 511 struct gfs2_holder *next_gh;
594 if (gh->gh_flags & GL_LOCAL_EXCL) 512 if (gh->gh_flags & GL_LOCAL_EXCL)
@@ -597,16 +515,12 @@ static int rq_promote(struct gfs2_holder *gh)
597 gh_list); 515 gh_list);
598 if (next_gh->gh_flags & GL_LOCAL_EXCL) 516 if (next_gh->gh_flags & GL_LOCAL_EXCL)
599 return 1; 517 return 1;
600 recurse = test_bit(HIF_RECURSE, &gh->gh_iflags);
601 } 518 }
602 519
603 list_move_tail(&gh->gh_list, &gl->gl_holders); 520 list_move_tail(&gh->gh_list, &gl->gl_holders);
604 gh->gh_error = 0; 521 gh->gh_error = 0;
605 set_bit(HIF_HOLDER, &gh->gh_iflags); 522 set_bit(HIF_HOLDER, &gh->gh_iflags);
606 523
607 if (recurse)
608 handle_recurse(gh);
609
610 complete(&gh->gh_wait); 524 complete(&gh->gh_wait);
611 525
612 return 0; 526 return 0;
@@ -897,8 +811,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
897 spin_lock(&gl->gl_spin); 811 spin_lock(&gl->gl_spin);
898 list_del_init(&gh->gh_list); 812 list_del_init(&gh->gh_list);
899 gh->gh_error = -EIO; 813 gh->gh_error = -EIO;
900 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
901 do_unrecurse(gh);
902 spin_unlock(&gl->gl_spin); 814 spin_unlock(&gl->gl_spin);
903 815
904 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) { 816 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
@@ -922,8 +834,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
922 spin_lock(&gl->gl_spin); 834 spin_lock(&gl->gl_spin);
923 list_del_init(&gh->gh_list); 835 list_del_init(&gh->gh_list);
924 gh->gh_error = GLR_CANCELED; 836 gh->gh_error = GLR_CANCELED;
925 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
926 do_unrecurse(gh);
927 spin_unlock(&gl->gl_spin); 837 spin_unlock(&gl->gl_spin);
928 838
929 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { 839 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
@@ -941,8 +851,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
941 spin_lock(&gl->gl_spin); 851 spin_lock(&gl->gl_spin);
942 list_del_init(&gh->gh_list); 852 list_del_init(&gh->gh_list);
943 gh->gh_error = GLR_TRYFAILED; 853 gh->gh_error = GLR_TRYFAILED;
944 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
945 do_unrecurse(gh);
946 spin_unlock(&gl->gl_spin); 854 spin_unlock(&gl->gl_spin);
947 855
948 } else { 856 } else {
@@ -1161,8 +1069,6 @@ static int glock_wait_internal(struct gfs2_holder *gh)
1161 !list_empty(&gh->gh_list)) { 1069 !list_empty(&gh->gh_list)) {
1162 list_del_init(&gh->gh_list); 1070 list_del_init(&gh->gh_list);
1163 gh->gh_error = GLR_TRYFAILED; 1071 gh->gh_error = GLR_TRYFAILED;
1164 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
1165 do_unrecurse(gh);
1166 run_queue(gl); 1072 run_queue(gl);
1167 spin_unlock(&gl->gl_spin); 1073 spin_unlock(&gl->gl_spin);
1168 return gh->gh_error; 1074 return gh->gh_error;
@@ -1191,9 +1097,6 @@ static int glock_wait_internal(struct gfs2_holder *gh)
1191 if (gh->gh_error) { 1097 if (gh->gh_error) {
1192 spin_lock(&gl->gl_spin); 1098 spin_lock(&gl->gl_spin);
1193 list_del_init(&gh->gh_list); 1099 list_del_init(&gh->gh_list);
1194 if (test_and_clear_bit(HIF_RECURSE,
1195 &gh->gh_iflags))
1196 do_unrecurse(gh);
1197 spin_unlock(&gl->gl_spin); 1100 spin_unlock(&gl->gl_spin);
1198 } 1101 }
1199 } 1102 }
@@ -1202,8 +1105,6 @@ static int glock_wait_internal(struct gfs2_holder *gh)
1202 gl->gl_req_gh = NULL; 1105 gl->gl_req_gh = NULL;
1203 gl->gl_req_bh = NULL; 1106 gl->gl_req_bh = NULL;
1204 clear_bit(GLF_LOCK, &gl->gl_flags); 1107 clear_bit(GLF_LOCK, &gl->gl_flags);
1205 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
1206 handle_recurse(gh);
1207 run_queue(gl); 1108 run_queue(gl);
1208 spin_unlock(&gl->gl_spin); 1109 spin_unlock(&gl->gl_spin);
1209 } 1110 }
@@ -1225,40 +1126,6 @@ find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1225} 1126}
1226 1127
1227/** 1128/**
1228 * recurse_check -
1229 *
1230 * Make sure the new holder is compatible with the pre-existing one.
1231 *
1232 */
1233
1234static int recurse_check(struct gfs2_holder *existing, struct gfs2_holder *new,
1235 unsigned int state)
1236{
1237 struct gfs2_sbd *sdp = existing->gh_gl->gl_sbd;
1238
1239 if (gfs2_assert_warn(sdp, (new->gh_flags & LM_FLAG_ANY) ||
1240 !(existing->gh_flags & LM_FLAG_ANY)))
1241 goto fail;
1242
1243 if (gfs2_assert_warn(sdp, (existing->gh_flags & GL_LOCAL_EXCL) ||
1244 !(new->gh_flags & GL_LOCAL_EXCL)))
1245 goto fail;
1246
1247 if (gfs2_assert_warn(sdp, relaxed_state_ok(state, new->gh_state,
1248 new->gh_flags)))
1249 goto fail;
1250
1251 return 0;
1252
1253fail:
1254 print_symbol(KERN_WARNING "GFS2: Existing holder from %s\n",
1255 existing->gh_ip);
1256 print_symbol(KERN_WARNING "GFS2: New holder from %s\n", new->gh_ip);
1257 set_bit(HIF_ABORTED, &new->gh_iflags);
1258 return -EINVAL;
1259}
1260
1261/**
1262 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1129 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1263 * @gh: the holder structure to add 1130 * @gh: the holder structure to add
1264 * 1131 *
@@ -1271,37 +1138,20 @@ static void add_to_queue(struct gfs2_holder *gh)
1271 1138
1272 BUG_ON(!gh->gh_owner); 1139 BUG_ON(!gh->gh_owner);
1273 1140
1274 if (!gh->gh_owner)
1275 goto out;
1276
1277 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); 1141 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1278 if (existing) { 1142 if (existing) {
1279 if (recurse_check(existing, gh, gl->gl_state)) 1143 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1280 return; 1144 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1281 1145 BUG();
1282 list_add_tail(&gh->gh_list, &gl->gl_holders);
1283 set_bit(HIF_HOLDER, &gh->gh_iflags);
1284
1285 gh->gh_error = 0;
1286 complete(&gh->gh_wait);
1287
1288 return;
1289 } 1146 }
1290 1147
1291 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner); 1148 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1292 if (existing) { 1149 if (existing) {
1293 if (recurse_check(existing, gh, existing->gh_state)) 1150 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1294 return; 1151 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1295 1152 BUG();
1296 set_bit(HIF_RECURSE, &gh->gh_iflags);
1297 set_bit(HIF_RECURSE, &existing->gh_iflags);
1298
1299 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1300
1301 return;
1302 } 1153 }
1303 1154
1304 out:
1305 if (gh->gh_flags & LM_FLAG_PRIORITY) 1155 if (gh->gh_flags & LM_FLAG_PRIORITY)
1306 list_add(&gh->gh_list, &gl->gl_waiters3); 1156 list_add(&gh->gh_list, &gl->gl_waiters3);
1307 else 1157 else
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 761f00153d43..84dd2f579e62 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -146,7 +146,6 @@ enum {
146 HIF_DEALLOC = 5, 146 HIF_DEALLOC = 5,
147 HIF_HOLDER = 6, 147 HIF_HOLDER = 6,
148 HIF_FIRST = 7, 148 HIF_FIRST = 7,
149 HIF_RECURSE = 8,
150 HIF_ABORTED = 9, 149 HIF_ABORTED = 9,
151}; 150};
152 151
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index e91c2bda6c32..7d467966f92c 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -436,30 +436,35 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
436 unsigned int pass; 436 unsigned int pass;
437 int error; 437 int error;
438 438
439 fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n", jd->jd_jid); 439 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
440 440 fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
441 /* Aquire the journal lock so we can do recovery */ 441 jd->jd_jid);
442
443 error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
444 LM_ST_EXCLUSIVE,
445 LM_FLAG_NOEXP | LM_FLAG_TRY | GL_NOCACHE,
446 &j_gh);
447 switch (error) {
448 case 0:
449 break;
450 442
451 case GLR_TRYFAILED: 443 /* Aquire the journal lock so we can do recovery */
452 fs_info(sdp, "jid=%u: Busy\n", jd->jd_jid);
453 error = 0;
454 444
455 default: 445 error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
456 goto fail; 446 LM_ST_EXCLUSIVE,
457 }; 447 LM_FLAG_NOEXP | LM_FLAG_TRY | GL_NOCACHE,
448 &j_gh);
449 switch (error) {
450 case 0:
451 break;
452
453 case GLR_TRYFAILED:
454 fs_info(sdp, "jid=%u: Busy\n", jd->jd_jid);
455 error = 0;
456
457 default:
458 goto fail;
459 };
458 460
459 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 461 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
460 LM_FLAG_NOEXP, &ji_gh); 462 LM_FLAG_NOEXP, &ji_gh);
461 if (error) 463 if (error)
462 goto fail_gunlock_j; 464 goto fail_gunlock_j;
465 } else {
466 fs_info(sdp, "jid=%u, already locked for use\n", jd->jd_jid);
467 }
463 468
464 fs_info(sdp, "jid=%u: Looking at journal...\n", jd->jd_jid); 469 fs_info(sdp, "jid=%u: Looking at journal...\n", jd->jd_jid);
465 470
@@ -481,10 +486,8 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
481 486
482 error = gfs2_glock_nq_init(sdp->sd_trans_gl, 487 error = gfs2_glock_nq_init(sdp->sd_trans_gl,
483 LM_ST_SHARED, 488 LM_ST_SHARED,
484 LM_FLAG_NOEXP | 489 LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
485 LM_FLAG_PRIORITY | 490 GL_NEVER_RECURSE | GL_NOCANCEL |
486 GL_NEVER_RECURSE |
487 GL_NOCANCEL |
488 GL_NOCACHE, 491 GL_NOCACHE,
489 &t_gh); 492 &t_gh);
490 if (error) 493 if (error)
@@ -521,37 +524,35 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
521 goto fail_gunlock_tr; 524 goto fail_gunlock_tr;
522 525
523 gfs2_glock_dq_uninit(&t_gh); 526 gfs2_glock_dq_uninit(&t_gh);
524
525 t = DIV_ROUND_UP(jiffies - t, HZ); 527 t = DIV_ROUND_UP(jiffies - t, HZ);
526
527 fs_info(sdp, "jid=%u: Journal replayed in %lus\n", 528 fs_info(sdp, "jid=%u: Journal replayed in %lus\n",
528 jd->jd_jid, t); 529 jd->jd_jid, t);
529 } 530 }
530 531
531 gfs2_glock_dq_uninit(&ji_gh); 532 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
533 gfs2_glock_dq_uninit(&ji_gh);
532 534
533 gfs2_lm_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS); 535 gfs2_lm_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
534 536
535 gfs2_glock_dq_uninit(&j_gh); 537 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
538 gfs2_glock_dq_uninit(&j_gh);
536 539
537 fs_info(sdp, "jid=%u: Done\n", jd->jd_jid); 540 fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
538
539 return 0; 541 return 0;
540 542
541 fail_gunlock_tr: 543fail_gunlock_tr:
542 gfs2_glock_dq_uninit(&t_gh); 544 gfs2_glock_dq_uninit(&t_gh);
543 545fail_gunlock_ji:
544 fail_gunlock_ji: 546 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
545 gfs2_glock_dq_uninit(&ji_gh); 547 gfs2_glock_dq_uninit(&ji_gh);
546 548fail_gunlock_j:
547 fail_gunlock_j: 549 gfs2_glock_dq_uninit(&j_gh);
548 gfs2_glock_dq_uninit(&j_gh); 550 }
549 551
550 fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done"); 552 fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done");
551 553
552 fail: 554fail:
553 gfs2_lm_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP); 555 gfs2_lm_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
554
555 return error; 556 return error;
556} 557}
557 558