diff options
author | Christoph Hellwig <hch@lst.de> | 2018-03-14 02:15:29 -0400 |
---|---|---|
committer | Darrick J. Wong <darrick.wong@oracle.com> | 2018-03-14 14:12:52 -0400 |
commit | e6b965705685c7fc2f24a68410529f86c08c7277 (patch) | |
tree | 0e34740ae42c3c3fc783a5a5d95e2094783e4640 | |
parent | 656de4ffaffd921e1b45de4150c86ba50da135e9 (diff) |
xfs: refactor xfs_log_force
Streamline the conditionals so that it is more obvious which specific case
form the top of the function comments is being handled. Use gotos only
for early returns.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
-rw-r--r-- | fs/xfs/xfs_log.c | 144 |
1 files changed, 63 insertions, 81 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index cb50dd72a3f3..2a0f882e7f7e 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -3318,99 +3318,81 @@ xfs_log_force( | |||
3318 | xlog_cil_force(log); | 3318 | xlog_cil_force(log); |
3319 | 3319 | ||
3320 | spin_lock(&log->l_icloglock); | 3320 | spin_lock(&log->l_icloglock); |
3321 | |||
3322 | iclog = log->l_iclog; | 3321 | iclog = log->l_iclog; |
3323 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3322 | if (iclog->ic_state & XLOG_STATE_IOERROR) |
3324 | spin_unlock(&log->l_icloglock); | 3323 | goto out_error; |
3325 | return -EIO; | ||
3326 | } | ||
3327 | 3324 | ||
3328 | /* If the head iclog is not active nor dirty, we just attach | 3325 | if (iclog->ic_state == XLOG_STATE_DIRTY || |
3329 | * ourselves to the head and go to sleep. | 3326 | (iclog->ic_state == XLOG_STATE_ACTIVE && |
3330 | */ | 3327 | atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { |
3331 | if (iclog->ic_state == XLOG_STATE_ACTIVE || | ||
3332 | iclog->ic_state == XLOG_STATE_DIRTY) { | ||
3333 | /* | 3328 | /* |
3334 | * If the head is dirty or (active and empty), then | 3329 | * If the head is dirty or (active and empty), then we need to |
3335 | * we need to look at the previous iclog. If the previous | 3330 | * look at the previous iclog. |
3336 | * iclog is active or dirty we are done. There is nothing | 3331 | * |
3337 | * to sync out. Otherwise, we attach ourselves to the | 3332 | * If the previous iclog is active or dirty we are done. There |
3333 | * is nothing to sync out. Otherwise, we attach ourselves to the | ||
3338 | * previous iclog and go to sleep. | 3334 | * previous iclog and go to sleep. |
3339 | */ | 3335 | */ |
3340 | if (iclog->ic_state == XLOG_STATE_DIRTY || | 3336 | iclog = iclog->ic_prev; |
3341 | (atomic_read(&iclog->ic_refcnt) == 0 | 3337 | if (iclog->ic_state == XLOG_STATE_ACTIVE || |
3342 | && iclog->ic_offset == 0)) { | 3338 | iclog->ic_state == XLOG_STATE_DIRTY) |
3343 | iclog = iclog->ic_prev; | 3339 | goto out_unlock; |
3344 | if (iclog->ic_state == XLOG_STATE_ACTIVE || | 3340 | } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { |
3345 | iclog->ic_state == XLOG_STATE_DIRTY) | 3341 | if (atomic_read(&iclog->ic_refcnt) == 0) { |
3346 | goto no_sleep; | 3342 | /* |
3347 | else | 3343 | * We are the only one with access to this iclog. |
3348 | goto maybe_sleep; | 3344 | * |
3349 | } else { | 3345 | * Flush it out now. There should be a roundoff of zero |
3350 | if (atomic_read(&iclog->ic_refcnt) == 0) { | 3346 | * to show that someone has already taken care of the |
3351 | /* We are the only one with access to this | 3347 | * roundoff from the previous sync. |
3352 | * iclog. Flush it out now. There should | 3348 | */ |
3353 | * be a roundoff of zero to show that someone | 3349 | atomic_inc(&iclog->ic_refcnt); |
3354 | * has already taken care of the roundoff from | 3350 | lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
3355 | * the previous sync. | 3351 | xlog_state_switch_iclogs(log, iclog, 0); |
3356 | */ | 3352 | spin_unlock(&log->l_icloglock); |
3357 | atomic_inc(&iclog->ic_refcnt); | ||
3358 | lsn = be64_to_cpu(iclog->ic_header.h_lsn); | ||
3359 | xlog_state_switch_iclogs(log, iclog, 0); | ||
3360 | spin_unlock(&log->l_icloglock); | ||
3361 | |||
3362 | if (xlog_state_release_iclog(log, iclog)) | ||
3363 | return -EIO; | ||
3364 | 3353 | ||
3365 | spin_lock(&log->l_icloglock); | 3354 | if (xlog_state_release_iclog(log, iclog)) |
3366 | if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && | 3355 | return -EIO; |
3367 | iclog->ic_state != XLOG_STATE_DIRTY) | ||
3368 | goto maybe_sleep; | ||
3369 | else | ||
3370 | goto no_sleep; | ||
3371 | } else { | ||
3372 | /* Someone else is writing to this iclog. | ||
3373 | * Use its call to flush out the data. However, | ||
3374 | * the other thread may not force out this LR, | ||
3375 | * so we mark it WANT_SYNC. | ||
3376 | */ | ||
3377 | xlog_state_switch_iclogs(log, iclog, 0); | ||
3378 | goto maybe_sleep; | ||
3379 | } | ||
3380 | } | ||
3381 | } | ||
3382 | 3356 | ||
3383 | /* By the time we come around again, the iclog could've been filled | 3357 | spin_lock(&log->l_icloglock); |
3384 | * which would give it another lsn. If we have a new lsn, just | 3358 | if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn || |
3385 | * return because the relevant data has been flushed. | 3359 | iclog->ic_state == XLOG_STATE_DIRTY) |
3386 | */ | 3360 | goto out_unlock; |
3387 | maybe_sleep: | 3361 | } else { |
3388 | if (flags & XFS_LOG_SYNC) { | 3362 | /* |
3389 | /* | 3363 | * Someone else is writing to this iclog. |
3390 | * We must check if we're shutting down here, before | 3364 | * |
3391 | * we wait, while we're holding the l_icloglock. | 3365 | * Use its call to flush out the data. However, the |
3392 | * Then we check again after waking up, in case our | 3366 | * other thread may not force out this LR, so we mark |
3393 | * sleep was disturbed by a bad news. | 3367 | * it WANT_SYNC. |
3394 | */ | 3368 | */ |
3395 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3369 | xlog_state_switch_iclogs(log, iclog, 0); |
3396 | spin_unlock(&log->l_icloglock); | ||
3397 | return -EIO; | ||
3398 | } | 3370 | } |
3399 | XFS_STATS_INC(mp, xs_log_force_sleep); | 3371 | } else { |
3400 | xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); | ||
3401 | /* | 3372 | /* |
3402 | * No need to grab the log lock here since we're | 3373 | * If the head iclog is not active nor dirty, we just attach |
3403 | * only deciding whether or not to return EIO | 3374 | * ourselves to the head and go to sleep if necessary. |
3404 | * and the memory read should be atomic. | ||
3405 | */ | 3375 | */ |
3406 | if (iclog->ic_state & XLOG_STATE_IOERROR) | 3376 | ; |
3407 | return -EIO; | ||
3408 | } else { | ||
3409 | |||
3410 | no_sleep: | ||
3411 | spin_unlock(&log->l_icloglock); | ||
3412 | } | 3377 | } |
3378 | |||
3379 | if (!(flags & XFS_LOG_SYNC)) | ||
3380 | goto out_unlock; | ||
3381 | |||
3382 | if (iclog->ic_state & XLOG_STATE_IOERROR) | ||
3383 | goto out_error; | ||
3384 | XFS_STATS_INC(mp, xs_log_force_sleep); | ||
3385 | xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); | ||
3386 | if (iclog->ic_state & XLOG_STATE_IOERROR) | ||
3387 | return -EIO; | ||
3413 | return 0; | 3388 | return 0; |
3389 | |||
3390 | out_unlock: | ||
3391 | spin_unlock(&log->l_icloglock); | ||
3392 | return 0; | ||
3393 | out_error: | ||
3394 | spin_unlock(&log->l_icloglock); | ||
3395 | return -EIO; | ||
3414 | } | 3396 | } |
3415 | 3397 | ||
3416 | /* | 3398 | /* |