diff options
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 105 |
1 files changed, 64 insertions, 41 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 0898f3ec8212..9adf8f924e08 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -328,6 +328,30 @@ static void gfs2_holder_wake(struct gfs2_holder *gh) | |||
328 | } | 328 | } |
329 | 329 | ||
330 | /** | 330 | /** |
331 | * do_error - Something unexpected has happened during a lock request | ||
332 | * | ||
333 | */ | ||
334 | |||
335 | static inline void do_error(struct gfs2_glock *gl, const int ret) | ||
336 | { | ||
337 | struct gfs2_holder *gh, *tmp; | ||
338 | |||
339 | list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { | ||
340 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
341 | continue; | ||
342 | if (ret & LM_OUT_ERROR) | ||
343 | gh->gh_error = -EIO; | ||
344 | else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) | ||
345 | gh->gh_error = GLR_TRYFAILED; | ||
346 | else | ||
347 | continue; | ||
348 | list_del_init(&gh->gh_list); | ||
349 | trace_gfs2_glock_queue(gh, 0); | ||
350 | gfs2_holder_wake(gh); | ||
351 | } | ||
352 | } | ||
353 | |||
354 | /** | ||
331 | * do_promote - promote as many requests as possible on the current queue | 355 | * do_promote - promote as many requests as possible on the current queue |
332 | * @gl: The glock | 356 | * @gl: The glock |
333 | * | 357 | * |
@@ -375,36 +399,13 @@ restart: | |||
375 | } | 399 | } |
376 | if (gh->gh_list.prev == &gl->gl_holders) | 400 | if (gh->gh_list.prev == &gl->gl_holders) |
377 | return 1; | 401 | return 1; |
402 | do_error(gl, 0); | ||
378 | break; | 403 | break; |
379 | } | 404 | } |
380 | return 0; | 405 | return 0; |
381 | } | 406 | } |
382 | 407 | ||
383 | /** | 408 | /** |
384 | * do_error - Something unexpected has happened during a lock request | ||
385 | * | ||
386 | */ | ||
387 | |||
388 | static inline void do_error(struct gfs2_glock *gl, const int ret) | ||
389 | { | ||
390 | struct gfs2_holder *gh, *tmp; | ||
391 | |||
392 | list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { | ||
393 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
394 | continue; | ||
395 | if (ret & LM_OUT_ERROR) | ||
396 | gh->gh_error = -EIO; | ||
397 | else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) | ||
398 | gh->gh_error = GLR_TRYFAILED; | ||
399 | else | ||
400 | continue; | ||
401 | list_del_init(&gh->gh_list); | ||
402 | trace_gfs2_glock_queue(gh, 0); | ||
403 | gfs2_holder_wake(gh); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * find_first_waiter - find the first gh that's waiting for the glock | 409 | * find_first_waiter - find the first gh that's waiting for the glock |
409 | * @gl: the glock | 410 | * @gl: the glock |
410 | */ | 411 | */ |
@@ -706,18 +707,8 @@ static void glock_work_func(struct work_struct *work) | |||
706 | { | 707 | { |
707 | unsigned long delay = 0; | 708 | unsigned long delay = 0; |
708 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); | 709 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); |
709 | struct gfs2_holder *gh; | ||
710 | int drop_ref = 0; | 710 | int drop_ref = 0; |
711 | 711 | ||
712 | if (unlikely(test_bit(GLF_FROZEN, &gl->gl_flags))) { | ||
713 | spin_lock(&gl->gl_spin); | ||
714 | gh = find_first_waiter(gl); | ||
715 | if (gh && (gh->gh_flags & LM_FLAG_NOEXP) && | ||
716 | test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) | ||
717 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | ||
718 | spin_unlock(&gl->gl_spin); | ||
719 | } | ||
720 | |||
721 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { | 712 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { |
722 | finish_xmote(gl, gl->gl_reply); | 713 | finish_xmote(gl, gl->gl_reply); |
723 | drop_ref = 1; | 714 | drop_ref = 1; |
@@ -1072,6 +1063,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh) | |||
1072 | 1063 | ||
1073 | spin_lock(&gl->gl_spin); | 1064 | spin_lock(&gl->gl_spin); |
1074 | add_to_queue(gh); | 1065 | add_to_queue(gh); |
1066 | if ((LM_FLAG_NOEXP & gh->gh_flags) && | ||
1067 | test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) | ||
1068 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | ||
1075 | run_queue(gl, 1); | 1069 | run_queue(gl, 1); |
1076 | spin_unlock(&gl->gl_spin); | 1070 | spin_unlock(&gl->gl_spin); |
1077 | 1071 | ||
@@ -1329,6 +1323,36 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) | |||
1329 | } | 1323 | } |
1330 | 1324 | ||
1331 | /** | 1325 | /** |
1326 | * gfs2_should_freeze - Figure out if glock should be frozen | ||
1327 | * @gl: The glock in question | ||
1328 | * | ||
1329 | * Glocks are not frozen if (a) the result of the dlm operation is | ||
1330 | * an error, (b) the locking operation was an unlock operation or | ||
1331 | * (c) if there is a "noexp" flagged request anywhere in the queue | ||
1332 | * | ||
1333 | * Returns: 1 if freezing should occur, 0 otherwise | ||
1334 | */ | ||
1335 | |||
1336 | static int gfs2_should_freeze(const struct gfs2_glock *gl) | ||
1337 | { | ||
1338 | const struct gfs2_holder *gh; | ||
1339 | |||
1340 | if (gl->gl_reply & ~LM_OUT_ST_MASK) | ||
1341 | return 0; | ||
1342 | if (gl->gl_target == LM_ST_UNLOCKED) | ||
1343 | return 0; | ||
1344 | |||
1345 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | ||
1346 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
1347 | continue; | ||
1348 | if (LM_FLAG_NOEXP & gh->gh_flags) | ||
1349 | return 0; | ||
1350 | } | ||
1351 | |||
1352 | return 1; | ||
1353 | } | ||
1354 | |||
1355 | /** | ||
1332 | * gfs2_glock_complete - Callback used by locking | 1356 | * gfs2_glock_complete - Callback used by locking |
1333 | * @gl: Pointer to the glock | 1357 | * @gl: Pointer to the glock |
1334 | * @ret: The return value from the dlm | 1358 | * @ret: The return value from the dlm |
@@ -1338,18 +1362,17 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) | |||
1338 | void gfs2_glock_complete(struct gfs2_glock *gl, int ret) | 1362 | void gfs2_glock_complete(struct gfs2_glock *gl, int ret) |
1339 | { | 1363 | { |
1340 | struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; | 1364 | struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; |
1365 | |||
1341 | gl->gl_reply = ret; | 1366 | gl->gl_reply = ret; |
1367 | |||
1342 | if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { | 1368 | if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { |
1343 | struct gfs2_holder *gh; | ||
1344 | spin_lock(&gl->gl_spin); | 1369 | spin_lock(&gl->gl_spin); |
1345 | gh = find_first_waiter(gl); | 1370 | if (gfs2_should_freeze(gl)) { |
1346 | if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) && | ||
1347 | (gl->gl_target != LM_ST_UNLOCKED)) || | ||
1348 | ((ret & ~LM_OUT_ST_MASK) != 0)) | ||
1349 | set_bit(GLF_FROZEN, &gl->gl_flags); | 1371 | set_bit(GLF_FROZEN, &gl->gl_flags); |
1350 | spin_unlock(&gl->gl_spin); | 1372 | spin_unlock(&gl->gl_spin); |
1351 | if (test_bit(GLF_FROZEN, &gl->gl_flags)) | ||
1352 | return; | 1373 | return; |
1374 | } | ||
1375 | spin_unlock(&gl->gl_spin); | ||
1353 | } | 1376 | } |
1354 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | 1377 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); |
1355 | gfs2_glock_hold(gl); | 1378 | gfs2_glock_hold(gl); |