aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-12-20 20:09:01 -0500
committerDave Chinner <david@fromorbit.com>2010-12-20 20:09:01 -0500
commiteb40a87500ac2f6be7eaf8ebb35610e6d0e60e9a (patch)
tree3f04eefbbb1428faf83aae5e9e3c951ae2748aa3 /fs/xfs/xfs_log.c
parenta69ed03c24d4a336c23b7116127713d5a8c5ac4d (diff)
xfs: use wait queues directly for the log wait queues
The log grant queues are one of the few places left using sv_t constructs for waiting. Given we are touching this code, we should convert them to plain wait queues. While there, convert all the other sv_t users in the log code as well. Seeing as this removes the last users of the sv_t type, remove the header file defining the wrapper and the fragments that still reference it. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c64
1 files changed, 28 insertions, 36 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 6bba8b4b8596..cc0504e0bb3b 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -547,8 +547,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
547 if (!(iclog->ic_state == XLOG_STATE_ACTIVE || 547 if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
548 iclog->ic_state == XLOG_STATE_DIRTY)) { 548 iclog->ic_state == XLOG_STATE_DIRTY)) {
549 if (!XLOG_FORCED_SHUTDOWN(log)) { 549 if (!XLOG_FORCED_SHUTDOWN(log)) {
550 sv_wait(&iclog->ic_force_wait, PMEM, 550 xlog_wait(&iclog->ic_force_wait,
551 &log->l_icloglock, s); 551 &log->l_icloglock);
552 } else { 552 } else {
553 spin_unlock(&log->l_icloglock); 553 spin_unlock(&log->l_icloglock);
554 } 554 }
@@ -588,8 +588,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
588 || iclog->ic_state == XLOG_STATE_DIRTY 588 || iclog->ic_state == XLOG_STATE_DIRTY
589 || iclog->ic_state == XLOG_STATE_IOERROR) ) { 589 || iclog->ic_state == XLOG_STATE_IOERROR) ) {
590 590
591 sv_wait(&iclog->ic_force_wait, PMEM, 591 xlog_wait(&iclog->ic_force_wait,
592 &log->l_icloglock, s); 592 &log->l_icloglock);
593 } else { 593 } else {
594 spin_unlock(&log->l_icloglock); 594 spin_unlock(&log->l_icloglock);
595 } 595 }
@@ -700,7 +700,7 @@ xfs_log_move_tail(xfs_mount_t *mp,
700 break; 700 break;
701 tail_lsn = 0; 701 tail_lsn = 0;
702 free_bytes -= tic->t_unit_res; 702 free_bytes -= tic->t_unit_res;
703 sv_signal(&tic->t_wait); 703 wake_up(&tic->t_wait);
704 } 704 }
705 } 705 }
706 706
@@ -719,7 +719,7 @@ xfs_log_move_tail(xfs_mount_t *mp,
719 break; 719 break;
720 tail_lsn = 0; 720 tail_lsn = 0;
721 free_bytes -= need_bytes; 721 free_bytes -= need_bytes;
722 sv_signal(&tic->t_wait); 722 wake_up(&tic->t_wait);
723 } 723 }
724 } 724 }
725 spin_unlock(&log->l_grant_lock); 725 spin_unlock(&log->l_grant_lock);
@@ -1060,7 +1060,7 @@ xlog_alloc_log(xfs_mount_t *mp,
1060 1060
1061 spin_lock_init(&log->l_icloglock); 1061 spin_lock_init(&log->l_icloglock);
1062 spin_lock_init(&log->l_grant_lock); 1062 spin_lock_init(&log->l_grant_lock);
1063 sv_init(&log->l_flush_wait, 0, "flush_wait"); 1063 init_waitqueue_head(&log->l_flush_wait);
1064 1064
1065 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ 1065 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
1066 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); 1066 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1116,8 +1116,8 @@ xlog_alloc_log(xfs_mount_t *mp,
1116 1116
1117 ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); 1117 ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
1118 ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); 1118 ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
1119 sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force"); 1119 init_waitqueue_head(&iclog->ic_force_wait);
1120 sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write"); 1120 init_waitqueue_head(&iclog->ic_write_wait);
1121 1121
1122 iclogp = &iclog->ic_next; 1122 iclogp = &iclog->ic_next;
1123 } 1123 }
@@ -1132,11 +1132,8 @@ xlog_alloc_log(xfs_mount_t *mp,
1132out_free_iclog: 1132out_free_iclog:
1133 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1133 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1134 prev_iclog = iclog->ic_next; 1134 prev_iclog = iclog->ic_next;
1135 if (iclog->ic_bp) { 1135 if (iclog->ic_bp)
1136 sv_destroy(&iclog->ic_force_wait);
1137 sv_destroy(&iclog->ic_write_wait);
1138 xfs_buf_free(iclog->ic_bp); 1136 xfs_buf_free(iclog->ic_bp);
1139 }
1140 kmem_free(iclog); 1137 kmem_free(iclog);
1141 } 1138 }
1142 spinlock_destroy(&log->l_icloglock); 1139 spinlock_destroy(&log->l_icloglock);
@@ -1453,8 +1450,6 @@ xlog_dealloc_log(xlog_t *log)
1453 1450
1454 iclog = log->l_iclog; 1451 iclog = log->l_iclog;
1455 for (i=0; i<log->l_iclog_bufs; i++) { 1452 for (i=0; i<log->l_iclog_bufs; i++) {
1456 sv_destroy(&iclog->ic_force_wait);
1457 sv_destroy(&iclog->ic_write_wait);
1458 xfs_buf_free(iclog->ic_bp); 1453 xfs_buf_free(iclog->ic_bp);
1459 next_iclog = iclog->ic_next; 1454 next_iclog = iclog->ic_next;
1460 kmem_free(iclog); 1455 kmem_free(iclog);
@@ -2261,7 +2256,7 @@ xlog_state_do_callback(
2261 xlog_state_clean_log(log); 2256 xlog_state_clean_log(log);
2262 2257
2263 /* wake up threads waiting in xfs_log_force() */ 2258 /* wake up threads waiting in xfs_log_force() */
2264 sv_broadcast(&iclog->ic_force_wait); 2259 wake_up_all(&iclog->ic_force_wait);
2265 2260
2266 iclog = iclog->ic_next; 2261 iclog = iclog->ic_next;
2267 } while (first_iclog != iclog); 2262 } while (first_iclog != iclog);
@@ -2308,7 +2303,7 @@ xlog_state_do_callback(
2308 spin_unlock(&log->l_icloglock); 2303 spin_unlock(&log->l_icloglock);
2309 2304
2310 if (wake) 2305 if (wake)
2311 sv_broadcast(&log->l_flush_wait); 2306 wake_up_all(&log->l_flush_wait);
2312} 2307}
2313 2308
2314 2309
@@ -2359,7 +2354,7 @@ xlog_state_done_syncing(
2359 * iclog buffer, we wake them all, one will get to do the 2354 * iclog buffer, we wake them all, one will get to do the
2360 * I/O, the others get to wait for the result. 2355 * I/O, the others get to wait for the result.
2361 */ 2356 */
2362 sv_broadcast(&iclog->ic_write_wait); 2357 wake_up_all(&iclog->ic_write_wait);
2363 spin_unlock(&log->l_icloglock); 2358 spin_unlock(&log->l_icloglock);
2364 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ 2359 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */
2365} /* xlog_state_done_syncing */ 2360} /* xlog_state_done_syncing */
@@ -2408,7 +2403,7 @@ restart:
2408 XFS_STATS_INC(xs_log_noiclogs); 2403 XFS_STATS_INC(xs_log_noiclogs);
2409 2404
2410 /* Wait for log writes to have flushed */ 2405 /* Wait for log writes to have flushed */
2411 sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0); 2406 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2412 goto restart; 2407 goto restart;
2413 } 2408 }
2414 2409
@@ -2523,7 +2518,8 @@ xlog_grant_log_space(xlog_t *log,
2523 goto error_return; 2518 goto error_return;
2524 2519
2525 XFS_STATS_INC(xs_sleep_logspace); 2520 XFS_STATS_INC(xs_sleep_logspace);
2526 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); 2521 xlog_wait(&tic->t_wait, &log->l_grant_lock);
2522
2527 /* 2523 /*
2528 * If we got an error, and the filesystem is shutting down, 2524 * If we got an error, and the filesystem is shutting down,
2529 * we'll catch it down below. So just continue... 2525 * we'll catch it down below. So just continue...
@@ -2552,7 +2548,7 @@ redo:
2552 spin_lock(&log->l_grant_lock); 2548 spin_lock(&log->l_grant_lock);
2553 2549
2554 XFS_STATS_INC(xs_sleep_logspace); 2550 XFS_STATS_INC(xs_sleep_logspace);
2555 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); 2551 xlog_wait(&tic->t_wait, &log->l_grant_lock);
2556 2552
2557 spin_lock(&log->l_grant_lock); 2553 spin_lock(&log->l_grant_lock);
2558 if (XLOG_FORCED_SHUTDOWN(log)) 2554 if (XLOG_FORCED_SHUTDOWN(log))
@@ -2635,7 +2631,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2635 if (free_bytes < ntic->t_unit_res) 2631 if (free_bytes < ntic->t_unit_res)
2636 break; 2632 break;
2637 free_bytes -= ntic->t_unit_res; 2633 free_bytes -= ntic->t_unit_res;
2638 sv_signal(&ntic->t_wait); 2634 wake_up(&ntic->t_wait);
2639 } 2635 }
2640 2636
2641 if (ntic != list_first_entry(&log->l_writeq, 2637 if (ntic != list_first_entry(&log->l_writeq,
@@ -2650,8 +2646,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2650 spin_lock(&log->l_grant_lock); 2646 spin_lock(&log->l_grant_lock);
2651 2647
2652 XFS_STATS_INC(xs_sleep_logspace); 2648 XFS_STATS_INC(xs_sleep_logspace);
2653 sv_wait(&tic->t_wait, PINOD|PLTWAIT, 2649 xlog_wait(&tic->t_wait, &log->l_grant_lock);
2654 &log->l_grant_lock, s);
2655 2650
2656 /* If we're shutting down, this tic is already 2651 /* If we're shutting down, this tic is already
2657 * off the queue */ 2652 * off the queue */
@@ -2677,8 +2672,7 @@ redo:
2677 2672
2678 XFS_STATS_INC(xs_sleep_logspace); 2673 XFS_STATS_INC(xs_sleep_logspace);
2679 trace_xfs_log_regrant_write_sleep2(log, tic); 2674 trace_xfs_log_regrant_write_sleep2(log, tic);
2680 2675 xlog_wait(&tic->t_wait, &log->l_grant_lock);
2681 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2682 2676
2683 /* If we're shutting down, this tic is already off the queue */ 2677 /* If we're shutting down, this tic is already off the queue */
2684 spin_lock(&log->l_grant_lock); 2678 spin_lock(&log->l_grant_lock);
@@ -3029,7 +3023,7 @@ maybe_sleep:
3029 return XFS_ERROR(EIO); 3023 return XFS_ERROR(EIO);
3030 } 3024 }
3031 XFS_STATS_INC(xs_log_force_sleep); 3025 XFS_STATS_INC(xs_log_force_sleep);
3032 sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s); 3026 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3033 /* 3027 /*
3034 * No need to grab the log lock here since we're 3028 * No need to grab the log lock here since we're
3035 * only deciding whether or not to return EIO 3029 * only deciding whether or not to return EIO
@@ -3147,8 +3141,8 @@ try_again:
3147 3141
3148 XFS_STATS_INC(xs_log_force_sleep); 3142 XFS_STATS_INC(xs_log_force_sleep);
3149 3143
3150 sv_wait(&iclog->ic_prev->ic_write_wait, 3144 xlog_wait(&iclog->ic_prev->ic_write_wait,
3151 PSWP, &log->l_icloglock, s); 3145 &log->l_icloglock);
3152 if (log_flushed) 3146 if (log_flushed)
3153 *log_flushed = 1; 3147 *log_flushed = 1;
3154 already_slept = 1; 3148 already_slept = 1;
@@ -3176,7 +3170,7 @@ try_again:
3176 return XFS_ERROR(EIO); 3170 return XFS_ERROR(EIO);
3177 } 3171 }
3178 XFS_STATS_INC(xs_log_force_sleep); 3172 XFS_STATS_INC(xs_log_force_sleep);
3179 sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s); 3173 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3180 /* 3174 /*
3181 * No need to grab the log lock here since we're 3175 * No need to grab the log lock here since we're
3182 * only deciding whether or not to return EIO 3176 * only deciding whether or not to return EIO
@@ -3251,10 +3245,8 @@ xfs_log_ticket_put(
3251 xlog_ticket_t *ticket) 3245 xlog_ticket_t *ticket)
3252{ 3246{
3253 ASSERT(atomic_read(&ticket->t_ref) > 0); 3247 ASSERT(atomic_read(&ticket->t_ref) > 0);
3254 if (atomic_dec_and_test(&ticket->t_ref)) { 3248 if (atomic_dec_and_test(&ticket->t_ref))
3255 sv_destroy(&ticket->t_wait);
3256 kmem_zone_free(xfs_log_ticket_zone, ticket); 3249 kmem_zone_free(xfs_log_ticket_zone, ticket);
3257 }
3258} 3250}
3259 3251
3260xlog_ticket_t * 3252xlog_ticket_t *
@@ -3387,7 +3379,7 @@ xlog_ticket_alloc(
3387 tic->t_trans_type = 0; 3379 tic->t_trans_type = 0;
3388 if (xflags & XFS_LOG_PERM_RESERV) 3380 if (xflags & XFS_LOG_PERM_RESERV)
3389 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3381 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3390 sv_init(&tic->t_wait, SV_DEFAULT, "logtick"); 3382 init_waitqueue_head(&tic->t_wait);
3391 3383
3392 xlog_tic_reset_res(tic); 3384 xlog_tic_reset_res(tic);
3393 3385
@@ -3719,10 +3711,10 @@ xfs_log_force_umount(
3719 * action is protected by the GRANTLOCK. 3711 * action is protected by the GRANTLOCK.
3720 */ 3712 */
3721 list_for_each_entry(tic, &log->l_reserveq, t_queue) 3713 list_for_each_entry(tic, &log->l_reserveq, t_queue)
3722 sv_signal(&tic->t_wait); 3714 wake_up(&tic->t_wait);
3723 3715
3724 list_for_each_entry(tic, &log->l_writeq, t_queue) 3716 list_for_each_entry(tic, &log->l_writeq, t_queue)
3725 sv_signal(&tic->t_wait); 3717 wake_up(&tic->t_wait);
3726 spin_unlock(&log->l_grant_lock); 3718 spin_unlock(&log->l_grant_lock);
3727 3719
3728 if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { 3720 if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {