aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c56
1 files changed, 25 insertions, 31 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 70790eb48336..d118bf804480 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -678,15 +678,11 @@ xfs_log_move_tail(xfs_mount_t *mp,
678 if (tail_lsn == 0) 678 if (tail_lsn == 0)
679 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 679 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
680 680
681 spin_lock(&log->l_grant_lock); 681 /* tail_lsn == 1 implies that we weren't passed a valid value. */
682 682 if (tail_lsn != 1)
683 /* Also an invalid lsn. 1 implies that we aren't passing in a valid 683 atomic64_set(&log->l_tail_lsn, tail_lsn);
684 * tail_lsn.
685 */
686 if (tail_lsn != 1) {
687 log->l_tail_lsn = tail_lsn;
688 }
689 684
685 spin_lock(&log->l_grant_lock);
690 if (!list_empty(&log->l_writeq)) { 686 if (!list_empty(&log->l_writeq)) {
691#ifdef DEBUG 687#ifdef DEBUG
692 if (log->l_flags & XLOG_ACTIVE_RECOVERY) 688 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
@@ -789,21 +785,19 @@ xfs_log_need_covered(xfs_mount_t *mp)
789 * We may be holding the log iclog lock upon entering this routine. 785 * We may be holding the log iclog lock upon entering this routine.
790 */ 786 */
791xfs_lsn_t 787xfs_lsn_t
792xlog_assign_tail_lsn(xfs_mount_t *mp) 788xlog_assign_tail_lsn(
789 struct xfs_mount *mp)
793{ 790{
794 xfs_lsn_t tail_lsn; 791 xfs_lsn_t tail_lsn;
795 xlog_t *log = mp->m_log; 792 struct log *log = mp->m_log;
796 793
797 tail_lsn = xfs_trans_ail_tail(mp->m_ail); 794 tail_lsn = xfs_trans_ail_tail(mp->m_ail);
798 spin_lock(&log->l_grant_lock);
799 if (!tail_lsn) 795 if (!tail_lsn)
800 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 796 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
801 log->l_tail_lsn = tail_lsn;
802 spin_unlock(&log->l_grant_lock);
803 797
798 atomic64_set(&log->l_tail_lsn, tail_lsn);
804 return tail_lsn; 799 return tail_lsn;
805} /* xlog_assign_tail_lsn */ 800}
806
807 801
808/* 802/*
809 * Return the space in the log between the tail and the head. The head 803 * Return the space in the log between the tail and the head. The head
@@ -831,8 +825,8 @@ xlog_space_left(
831 int head_bytes; 825 int head_bytes;
832 826
833 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 827 xlog_crack_grant_head(head, &head_cycle, &head_bytes);
834 tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn)); 828 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
835 tail_cycle = CYCLE_LSN(log->l_tail_lsn); 829 tail_bytes = BBTOB(tail_bytes);
836 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 830 if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
837 free_bytes = log->l_logsize - (head_bytes - tail_bytes); 831 free_bytes = log->l_logsize - (head_bytes - tail_bytes);
838 else if (tail_cycle + 1 < head_cycle) 832 else if (tail_cycle + 1 < head_cycle)
@@ -1009,8 +1003,8 @@ xlog_alloc_log(xfs_mount_t *mp,
1009 1003
1010 log->l_prev_block = -1; 1004 log->l_prev_block = -1;
1011 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1005 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1012 log->l_tail_lsn = xlog_assign_lsn(1, 0); 1006 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1013 atomic64_set(&log->l_last_sync_lsn, xlog_assign_lsn(1, 0)); 1007 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1014 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1008 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1015 xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); 1009 xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
1016 xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); 1010 xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
@@ -1189,7 +1183,6 @@ xlog_grant_push_ail(
1189{ 1183{
1190 xfs_lsn_t threshold_lsn = 0; 1184 xfs_lsn_t threshold_lsn = 0;
1191 xfs_lsn_t last_sync_lsn; 1185 xfs_lsn_t last_sync_lsn;
1192 xfs_lsn_t tail_lsn;
1193 int free_blocks; 1186 int free_blocks;
1194 int free_bytes; 1187 int free_bytes;
1195 int threshold_block; 1188 int threshold_block;
@@ -1198,7 +1191,6 @@ xlog_grant_push_ail(
1198 1191
1199 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1192 ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1200 1193
1201 tail_lsn = log->l_tail_lsn;
1202 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); 1194 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
1203 free_blocks = BTOBBT(free_bytes); 1195 free_blocks = BTOBBT(free_bytes);
1204 1196
@@ -1213,8 +1205,9 @@ xlog_grant_push_ail(
1213 if (free_blocks >= free_threshold) 1205 if (free_blocks >= free_threshold)
1214 return; 1206 return;
1215 1207
1216 threshold_block = BLOCK_LSN(tail_lsn) + free_threshold; 1208 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1217 threshold_cycle = CYCLE_LSN(tail_lsn); 1209 &threshold_block);
1210 threshold_block += free_threshold;
1218 if (threshold_block >= log->l_logBBsize) { 1211 if (threshold_block >= log->l_logBBsize) {
1219 threshold_block -= log->l_logBBsize; 1212 threshold_block -= log->l_logBBsize;
1220 threshold_cycle += 1; 1213 threshold_cycle += 1;
@@ -2828,11 +2821,11 @@ xlog_state_release_iclog(
2828 2821
2829 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { 2822 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
2830 /* update tail before writing to iclog */ 2823 /* update tail before writing to iclog */
2831 xlog_assign_tail_lsn(log->l_mp); 2824 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
2832 sync++; 2825 sync++;
2833 iclog->ic_state = XLOG_STATE_SYNCING; 2826 iclog->ic_state = XLOG_STATE_SYNCING;
2834 iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn); 2827 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
2835 xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); 2828 xlog_verify_tail_lsn(log, iclog, tail_lsn);
2836 /* cycle incremented when incrementing curr_block */ 2829 /* cycle incremented when incrementing curr_block */
2837 } 2830 }
2838 spin_unlock(&log->l_icloglock); 2831 spin_unlock(&log->l_icloglock);
@@ -3435,7 +3428,7 @@ STATIC void
3435xlog_verify_grant_tail( 3428xlog_verify_grant_tail(
3436 struct log *log) 3429 struct log *log)
3437{ 3430{
3438 xfs_lsn_t tail_lsn = log->l_tail_lsn; 3431 int tail_cycle, tail_blocks;
3439 int cycle, space; 3432 int cycle, space;
3440 3433
3441 /* 3434 /*
@@ -3445,9 +3438,10 @@ xlog_verify_grant_tail(
3445 * check the byte count. 3438 * check the byte count.
3446 */ 3439 */
3447 xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space); 3440 xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
3448 if (CYCLE_LSN(tail_lsn) != cycle) { 3441 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3449 ASSERT(cycle - 1 == CYCLE_LSN(tail_lsn)); 3442 if (tail_cycle != cycle) {
3450 ASSERT(space <= BBTOB(BLOCK_LSN(tail_lsn))); 3443 ASSERT(cycle - 1 == tail_cycle);
3444 ASSERT(space <= BBTOB(tail_blocks));
3451 } 3445 }
3452} 3446}
3453 3447