diff options
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 162 |
1 files changed, 6 insertions, 156 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 32cc4005307d..0f317155915d 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -450,86 +450,6 @@ void gfs2_holder_put(struct gfs2_holder *gh) | |||
450 | } | 450 | } |
451 | 451 | ||
452 | /** | 452 | /** |
453 | * handle_recurse - put other holder structures (marked recursive) | ||
454 | * into the holders list | ||
455 | * @gh: the holder structure | ||
456 | * | ||
457 | */ | ||
458 | |||
459 | static void handle_recurse(struct gfs2_holder *gh) | ||
460 | { | ||
461 | struct gfs2_glock *gl = gh->gh_gl; | ||
462 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
463 | struct gfs2_holder *tmp_gh, *safe; | ||
464 | int found = 0; | ||
465 | |||
466 | BUG_ON(!spin_is_locked(&gl->gl_spin)); | ||
467 | |||
468 | printk(KERN_INFO "recursion %016llx, %u\n", gl->gl_name.ln_number, | ||
469 | gl->gl_name.ln_type); | ||
470 | |||
471 | if (gfs2_assert_warn(sdp, gh->gh_owner)) | ||
472 | return; | ||
473 | |||
474 | list_for_each_entry_safe(tmp_gh, safe, &gl->gl_waiters3, gh_list) { | ||
475 | if (tmp_gh->gh_owner != gh->gh_owner) | ||
476 | continue; | ||
477 | |||
478 | gfs2_assert_warn(sdp, | ||
479 | test_bit(HIF_RECURSE, &tmp_gh->gh_iflags)); | ||
480 | |||
481 | list_move_tail(&tmp_gh->gh_list, &gl->gl_holders); | ||
482 | tmp_gh->gh_error = 0; | ||
483 | set_bit(HIF_HOLDER, &tmp_gh->gh_iflags); | ||
484 | |||
485 | complete(&tmp_gh->gh_wait); | ||
486 | |||
487 | found = 1; | ||
488 | } | ||
489 | |||
490 | gfs2_assert_warn(sdp, found); | ||
491 | } | ||
492 | |||
493 | /** | ||
494 | * do_unrecurse - a recursive holder was just dropped of the waiters3 list | ||
495 | * @gh: the holder | ||
496 | * | ||
497 | * If there is only one other recursive holder, clear its HIF_RECURSE bit. | ||
498 | * If there is more than one, leave them alone. | ||
499 | * | ||
500 | */ | ||
501 | |||
502 | static void do_unrecurse(struct gfs2_holder *gh) | ||
503 | { | ||
504 | struct gfs2_glock *gl = gh->gh_gl; | ||
505 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
506 | struct gfs2_holder *tmp_gh, *last_gh = NULL; | ||
507 | int found = 0; | ||
508 | |||
509 | BUG_ON(!spin_is_locked(&gl->gl_spin)); | ||
510 | |||
511 | if (gfs2_assert_warn(sdp, gh->gh_owner)) | ||
512 | return; | ||
513 | |||
514 | list_for_each_entry(tmp_gh, &gl->gl_waiters3, gh_list) { | ||
515 | if (tmp_gh->gh_owner != gh->gh_owner) | ||
516 | continue; | ||
517 | |||
518 | gfs2_assert_warn(sdp, | ||
519 | test_bit(HIF_RECURSE, &tmp_gh->gh_iflags)); | ||
520 | |||
521 | if (found) | ||
522 | return; | ||
523 | |||
524 | found = 1; | ||
525 | last_gh = tmp_gh; | ||
526 | } | ||
527 | |||
528 | if (!gfs2_assert_warn(sdp, found)) | ||
529 | clear_bit(HIF_RECURSE, &last_gh->gh_iflags); | ||
530 | } | ||
531 | |||
532 | /** | ||
533 | * rq_mutex - process a mutex request in the queue | 453 | * rq_mutex - process a mutex request in the queue |
534 | * @gh: the glock holder | 454 | * @gh: the glock holder |
535 | * | 455 | * |
@@ -562,7 +482,6 @@ static int rq_promote(struct gfs2_holder *gh) | |||
562 | struct gfs2_glock *gl = gh->gh_gl; | 482 | struct gfs2_glock *gl = gh->gh_gl; |
563 | struct gfs2_sbd *sdp = gl->gl_sbd; | 483 | struct gfs2_sbd *sdp = gl->gl_sbd; |
564 | struct gfs2_glock_operations *glops = gl->gl_ops; | 484 | struct gfs2_glock_operations *glops = gl->gl_ops; |
565 | int recurse; | ||
566 | 485 | ||
567 | if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | 486 | if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { |
568 | if (list_empty(&gl->gl_holders)) { | 487 | if (list_empty(&gl->gl_holders)) { |
@@ -588,7 +507,6 @@ static int rq_promote(struct gfs2_holder *gh) | |||
588 | if (list_empty(&gl->gl_holders)) { | 507 | if (list_empty(&gl->gl_holders)) { |
589 | set_bit(HIF_FIRST, &gh->gh_iflags); | 508 | set_bit(HIF_FIRST, &gh->gh_iflags); |
590 | set_bit(GLF_LOCK, &gl->gl_flags); | 509 | set_bit(GLF_LOCK, &gl->gl_flags); |
591 | recurse = 0; | ||
592 | } else { | 510 | } else { |
593 | struct gfs2_holder *next_gh; | 511 | struct gfs2_holder *next_gh; |
594 | if (gh->gh_flags & GL_LOCAL_EXCL) | 512 | if (gh->gh_flags & GL_LOCAL_EXCL) |
@@ -597,16 +515,12 @@ static int rq_promote(struct gfs2_holder *gh) | |||
597 | gh_list); | 515 | gh_list); |
598 | if (next_gh->gh_flags & GL_LOCAL_EXCL) | 516 | if (next_gh->gh_flags & GL_LOCAL_EXCL) |
599 | return 1; | 517 | return 1; |
600 | recurse = test_bit(HIF_RECURSE, &gh->gh_iflags); | ||
601 | } | 518 | } |
602 | 519 | ||
603 | list_move_tail(&gh->gh_list, &gl->gl_holders); | 520 | list_move_tail(&gh->gh_list, &gl->gl_holders); |
604 | gh->gh_error = 0; | 521 | gh->gh_error = 0; |
605 | set_bit(HIF_HOLDER, &gh->gh_iflags); | 522 | set_bit(HIF_HOLDER, &gh->gh_iflags); |
606 | 523 | ||
607 | if (recurse) | ||
608 | handle_recurse(gh); | ||
609 | |||
610 | complete(&gh->gh_wait); | 524 | complete(&gh->gh_wait); |
611 | 525 | ||
612 | return 0; | 526 | return 0; |
@@ -897,8 +811,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
897 | spin_lock(&gl->gl_spin); | 811 | spin_lock(&gl->gl_spin); |
898 | list_del_init(&gh->gh_list); | 812 | list_del_init(&gh->gh_list); |
899 | gh->gh_error = -EIO; | 813 | gh->gh_error = -EIO; |
900 | if (test_bit(HIF_RECURSE, &gh->gh_iflags)) | ||
901 | do_unrecurse(gh); | ||
902 | spin_unlock(&gl->gl_spin); | 814 | spin_unlock(&gl->gl_spin); |
903 | 815 | ||
904 | } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) { | 816 | } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) { |
@@ -922,8 +834,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
922 | spin_lock(&gl->gl_spin); | 834 | spin_lock(&gl->gl_spin); |
923 | list_del_init(&gh->gh_list); | 835 | list_del_init(&gh->gh_list); |
924 | gh->gh_error = GLR_CANCELED; | 836 | gh->gh_error = GLR_CANCELED; |
925 | if (test_bit(HIF_RECURSE, &gh->gh_iflags)) | ||
926 | do_unrecurse(gh); | ||
927 | spin_unlock(&gl->gl_spin); | 837 | spin_unlock(&gl->gl_spin); |
928 | 838 | ||
929 | } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | 839 | } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { |
@@ -941,8 +851,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
941 | spin_lock(&gl->gl_spin); | 851 | spin_lock(&gl->gl_spin); |
942 | list_del_init(&gh->gh_list); | 852 | list_del_init(&gh->gh_list); |
943 | gh->gh_error = GLR_TRYFAILED; | 853 | gh->gh_error = GLR_TRYFAILED; |
944 | if (test_bit(HIF_RECURSE, &gh->gh_iflags)) | ||
945 | do_unrecurse(gh); | ||
946 | spin_unlock(&gl->gl_spin); | 854 | spin_unlock(&gl->gl_spin); |
947 | 855 | ||
948 | } else { | 856 | } else { |
@@ -1161,8 +1069,6 @@ static int glock_wait_internal(struct gfs2_holder *gh) | |||
1161 | !list_empty(&gh->gh_list)) { | 1069 | !list_empty(&gh->gh_list)) { |
1162 | list_del_init(&gh->gh_list); | 1070 | list_del_init(&gh->gh_list); |
1163 | gh->gh_error = GLR_TRYFAILED; | 1071 | gh->gh_error = GLR_TRYFAILED; |
1164 | if (test_bit(HIF_RECURSE, &gh->gh_iflags)) | ||
1165 | do_unrecurse(gh); | ||
1166 | run_queue(gl); | 1072 | run_queue(gl); |
1167 | spin_unlock(&gl->gl_spin); | 1073 | spin_unlock(&gl->gl_spin); |
1168 | return gh->gh_error; | 1074 | return gh->gh_error; |
@@ -1191,9 +1097,6 @@ static int glock_wait_internal(struct gfs2_holder *gh) | |||
1191 | if (gh->gh_error) { | 1097 | if (gh->gh_error) { |
1192 | spin_lock(&gl->gl_spin); | 1098 | spin_lock(&gl->gl_spin); |
1193 | list_del_init(&gh->gh_list); | 1099 | list_del_init(&gh->gh_list); |
1194 | if (test_and_clear_bit(HIF_RECURSE, | ||
1195 | &gh->gh_iflags)) | ||
1196 | do_unrecurse(gh); | ||
1197 | spin_unlock(&gl->gl_spin); | 1100 | spin_unlock(&gl->gl_spin); |
1198 | } | 1101 | } |
1199 | } | 1102 | } |
@@ -1202,8 +1105,6 @@ static int glock_wait_internal(struct gfs2_holder *gh) | |||
1202 | gl->gl_req_gh = NULL; | 1105 | gl->gl_req_gh = NULL; |
1203 | gl->gl_req_bh = NULL; | 1106 | gl->gl_req_bh = NULL; |
1204 | clear_bit(GLF_LOCK, &gl->gl_flags); | 1107 | clear_bit(GLF_LOCK, &gl->gl_flags); |
1205 | if (test_bit(HIF_RECURSE, &gh->gh_iflags)) | ||
1206 | handle_recurse(gh); | ||
1207 | run_queue(gl); | 1108 | run_queue(gl); |
1208 | spin_unlock(&gl->gl_spin); | 1109 | spin_unlock(&gl->gl_spin); |
1209 | } | 1110 | } |
@@ -1225,40 +1126,6 @@ find_holder_by_owner(struct list_head *head, struct task_struct *owner) | |||
1225 | } | 1126 | } |
1226 | 1127 | ||
1227 | /** | 1128 | /** |
1228 | * recurse_check - | ||
1229 | * | ||
1230 | * Make sure the new holder is compatible with the pre-existing one. | ||
1231 | * | ||
1232 | */ | ||
1233 | |||
1234 | static int recurse_check(struct gfs2_holder *existing, struct gfs2_holder *new, | ||
1235 | unsigned int state) | ||
1236 | { | ||
1237 | struct gfs2_sbd *sdp = existing->gh_gl->gl_sbd; | ||
1238 | |||
1239 | if (gfs2_assert_warn(sdp, (new->gh_flags & LM_FLAG_ANY) || | ||
1240 | !(existing->gh_flags & LM_FLAG_ANY))) | ||
1241 | goto fail; | ||
1242 | |||
1243 | if (gfs2_assert_warn(sdp, (existing->gh_flags & GL_LOCAL_EXCL) || | ||
1244 | !(new->gh_flags & GL_LOCAL_EXCL))) | ||
1245 | goto fail; | ||
1246 | |||
1247 | if (gfs2_assert_warn(sdp, relaxed_state_ok(state, new->gh_state, | ||
1248 | new->gh_flags))) | ||
1249 | goto fail; | ||
1250 | |||
1251 | return 0; | ||
1252 | |||
1253 | fail: | ||
1254 | print_symbol(KERN_WARNING "GFS2: Existing holder from %s\n", | ||
1255 | existing->gh_ip); | ||
1256 | print_symbol(KERN_WARNING "GFS2: New holder from %s\n", new->gh_ip); | ||
1257 | set_bit(HIF_ABORTED, &new->gh_iflags); | ||
1258 | return -EINVAL; | ||
1259 | } | ||
1260 | |||
1261 | /** | ||
1262 | * add_to_queue - Add a holder to the wait queue (but look for recursion) | 1129 | * add_to_queue - Add a holder to the wait queue (but look for recursion) |
1263 | * @gh: the holder structure to add | 1130 | * @gh: the holder structure to add |
1264 | * | 1131 | * |
@@ -1271,37 +1138,20 @@ static void add_to_queue(struct gfs2_holder *gh) | |||
1271 | 1138 | ||
1272 | BUG_ON(!gh->gh_owner); | 1139 | BUG_ON(!gh->gh_owner); |
1273 | 1140 | ||
1274 | if (!gh->gh_owner) | ||
1275 | goto out; | ||
1276 | |||
1277 | existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); | 1141 | existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); |
1278 | if (existing) { | 1142 | if (existing) { |
1279 | if (recurse_check(existing, gh, gl->gl_state)) | 1143 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); |
1280 | return; | 1144 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); |
1281 | 1145 | BUG(); | |
1282 | list_add_tail(&gh->gh_list, &gl->gl_holders); | ||
1283 | set_bit(HIF_HOLDER, &gh->gh_iflags); | ||
1284 | |||
1285 | gh->gh_error = 0; | ||
1286 | complete(&gh->gh_wait); | ||
1287 | |||
1288 | return; | ||
1289 | } | 1146 | } |
1290 | 1147 | ||
1291 | existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner); | 1148 | existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner); |
1292 | if (existing) { | 1149 | if (existing) { |
1293 | if (recurse_check(existing, gh, existing->gh_state)) | 1150 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); |
1294 | return; | 1151 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); |
1295 | 1152 | BUG(); | |
1296 | set_bit(HIF_RECURSE, &gh->gh_iflags); | ||
1297 | set_bit(HIF_RECURSE, &existing->gh_iflags); | ||
1298 | |||
1299 | list_add_tail(&gh->gh_list, &gl->gl_waiters3); | ||
1300 | |||
1301 | return; | ||
1302 | } | 1153 | } |
1303 | 1154 | ||
1304 | out: | ||
1305 | if (gh->gh_flags & LM_FLAG_PRIORITY) | 1155 | if (gh->gh_flags & LM_FLAG_PRIORITY) |
1306 | list_add(&gh->gh_list, &gl->gl_waiters3); | 1156 | list_add(&gh->gh_list, &gl->gl_waiters3); |
1307 | else | 1157 | else |