aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h2
-rw-r--r--fs/xfs/xfs_log.c56
-rw-r--r--fs/xfs/xfs_log_priv.h37
-rw-r--r--fs/xfs/xfs_log_recover.c14
4 files changed, 63 insertions, 46 deletions
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index 3ff6b35f920..b180e1bf825 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -794,7 +794,7 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
794 &__entry->grant_write_bytes); 794 &__entry->grant_write_bytes);
795 __entry->curr_cycle = log->l_curr_cycle; 795 __entry->curr_cycle = log->l_curr_cycle;
796 __entry->curr_block = log->l_curr_block; 796 __entry->curr_block = log->l_curr_block;
797 __entry->tail_lsn = log->l_tail_lsn; 797 __entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
798 ), 798 ),
799 TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " 799 TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
800 "t_unit_res %u t_flags %s reserveq %s " 800 "t_unit_res %u t_flags %s reserveq %s "
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 70790eb4833..d118bf80448 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -678,15 +678,11 @@ xfs_log_move_tail(xfs_mount_t *mp,
678 if (tail_lsn == 0) 678 if (tail_lsn == 0)
679 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 679 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
680 680
681 spin_lock(&log->l_grant_lock); 681 /* tail_lsn == 1 implies that we weren't passed a valid value. */
682 682 if (tail_lsn != 1)
683 /* Also an invalid lsn. 1 implies that we aren't passing in a valid 683 atomic64_set(&log->l_tail_lsn, tail_lsn);
684 * tail_lsn.
685 */
686 if (tail_lsn != 1) {
687 log->l_tail_lsn = tail_lsn;
688 }
689 684
685 spin_lock(&log->l_grant_lock);
690 if (!list_empty(&log->l_writeq)) { 686 if (!list_empty(&log->l_writeq)) {
691#ifdef DEBUG 687#ifdef DEBUG
692 if (log->l_flags & XLOG_ACTIVE_RECOVERY) 688 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
@@ -789,21 +785,19 @@ xfs_log_need_covered(xfs_mount_t *mp)
789 * We may be holding the log iclog lock upon entering this routine. 785 * We may be holding the log iclog lock upon entering this routine.
790 */ 786 */
791xfs_lsn_t 787xfs_lsn_t
792xlog_assign_tail_lsn(xfs_mount_t *mp) 788xlog_assign_tail_lsn(
789 struct xfs_mount *mp)
793{ 790{
794 xfs_lsn_t tail_lsn; 791 xfs_lsn_t tail_lsn;
795 xlog_t *log = mp->m_log; 792 struct log *log = mp->m_log;
796 793
797 tail_lsn = xfs_trans_ail_tail(mp->m_ail); 794 tail_lsn = xfs_trans_ail_tail(mp->m_ail);
798 spin_lock(&log->l_grant_lock);
799 if (!tail_lsn) 795 if (!tail_lsn)
800 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 796 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
801 log->l_tail_lsn = tail_lsn;
802 spin_unlock(&log->l_grant_lock);
803 797
798 atomic64_set(&log->l_tail_lsn, tail_lsn);
804 return tail_lsn; 799 return tail_lsn;
805} /* xlog_assign_tail_lsn */ 800}
806
807 801
808/* 802/*
809 * Return the space in the log between the tail and the head. The head 803 * Return the space in the log between the tail and the head. The head
@@ -831,8 +825,8 @@ xlog_space_left(
831 int head_bytes; 825 int head_bytes;
832 826
833 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 827 xlog_crack_grant_head(head, &head_cycle, &head_bytes);
834 tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn)); 828 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
835 tail_cycle = CYCLE_LSN(log->l_tail_lsn); 829 tail_bytes = BBTOB(tail_bytes);
836 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 830 if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
837 free_bytes = log->l_logsize - (head_bytes - tail_bytes); 831 free_bytes = log->l_logsize - (head_bytes - tail_bytes);
838 else if (tail_cycle + 1 < head_cycle) 832 else if (tail_cycle + 1 < head_cycle)
@@ -1009,8 +1003,8 @@ xlog_alloc_log(xfs_mount_t *mp,
1009 1003
1010 log->l_prev_block = -1; 1004 log->l_prev_block = -1;
1011 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1005 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1012 log->l_tail_lsn = xlog_assign_lsn(1, 0); 1006 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1013 atomic64_set(&log->l_last_sync_lsn, xlog_assign_lsn(1, 0)); 1007 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1014 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1008 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1015 xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); 1009 xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
1016 xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); 1010 xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
@@ -1189,7 +1183,6 @@ xlog_grant_push_ail(
1189{ 1183{
1190 xfs_lsn_t threshold_lsn = 0; 1184 xfs_lsn_t threshold_lsn = 0;
1191 xfs_lsn_t last_sync_lsn; 1185 xfs_lsn_t last_sync_lsn;
1192 xfs_lsn_t tail_lsn;
1193 int free_blocks; 1186 int free_blocks;
1194 int free_bytes; 1187 int free_bytes;
1195 int threshold_block; 1188 int threshold_block;
@@ -1198,7 +1191,6 @@ xlog_grant_push_ail(
1198 1191
1199 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1192 ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1200 1193
1201 tail_lsn = log->l_tail_lsn;
1202 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); 1194 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
1203 free_blocks = BTOBBT(free_bytes); 1195 free_blocks = BTOBBT(free_bytes);
1204 1196
@@ -1213,8 +1205,9 @@ xlog_grant_push_ail(
1213 if (free_blocks >= free_threshold) 1205 if (free_blocks >= free_threshold)
1214 return; 1206 return;
1215 1207
1216 threshold_block = BLOCK_LSN(tail_lsn) + free_threshold; 1208 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1217 threshold_cycle = CYCLE_LSN(tail_lsn); 1209 &threshold_block);
1210 threshold_block += free_threshold;
1218 if (threshold_block >= log->l_logBBsize) { 1211 if (threshold_block >= log->l_logBBsize) {
1219 threshold_block -= log->l_logBBsize; 1212 threshold_block -= log->l_logBBsize;
1220 threshold_cycle += 1; 1213 threshold_cycle += 1;
@@ -2828,11 +2821,11 @@ xlog_state_release_iclog(
2828 2821
2829 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { 2822 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
2830 /* update tail before writing to iclog */ 2823 /* update tail before writing to iclog */
2831 xlog_assign_tail_lsn(log->l_mp); 2824 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
2832 sync++; 2825 sync++;
2833 iclog->ic_state = XLOG_STATE_SYNCING; 2826 iclog->ic_state = XLOG_STATE_SYNCING;
2834 iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn); 2827 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
2835 xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); 2828 xlog_verify_tail_lsn(log, iclog, tail_lsn);
2836 /* cycle incremented when incrementing curr_block */ 2829 /* cycle incremented when incrementing curr_block */
2837 } 2830 }
2838 spin_unlock(&log->l_icloglock); 2831 spin_unlock(&log->l_icloglock);
@@ -3435,7 +3428,7 @@ STATIC void
3435xlog_verify_grant_tail( 3428xlog_verify_grant_tail(
3436 struct log *log) 3429 struct log *log)
3437{ 3430{
3438 xfs_lsn_t tail_lsn = log->l_tail_lsn; 3431 int tail_cycle, tail_blocks;
3439 int cycle, space; 3432 int cycle, space;
3440 3433
3441 /* 3434 /*
@@ -3445,9 +3438,10 @@ xlog_verify_grant_tail(
3445 * check the byte count. 3438 * check the byte count.
3446 */ 3439 */
3447 xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space); 3440 xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
3448 if (CYCLE_LSN(tail_lsn) != cycle) { 3441 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3449 ASSERT(cycle - 1 == CYCLE_LSN(tail_lsn)); 3442 if (tail_cycle != cycle) {
3450 ASSERT(space <= BBTOB(BLOCK_LSN(tail_lsn))); 3443 ASSERT(cycle - 1 == tail_cycle);
3444 ASSERT(space <= BBTOB(tail_blocks));
3451 } 3445 }
3452} 3446}
3453 3447
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 958f356df10..d34af1c21ed 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -53,7 +53,6 @@ struct xfs_mount;
53 BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \ 53 BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
54 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) 54 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
55 55
56
57static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block) 56static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
58{ 57{
59 return ((xfs_lsn_t)cycle << 32) | block; 58 return ((xfs_lsn_t)cycle << 32) | block;
@@ -505,8 +504,6 @@ typedef struct log {
505 * log entries" */ 504 * log entries" */
506 xlog_in_core_t *l_iclog; /* head log queue */ 505 xlog_in_core_t *l_iclog; /* head log queue */
507 spinlock_t l_icloglock; /* grab to change iclog state */ 506 spinlock_t l_icloglock; /* grab to change iclog state */
508 xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed
509 * buffers */
510 int l_curr_cycle; /* Cycle number of log writes */ 507 int l_curr_cycle; /* Cycle number of log writes */
511 int l_prev_cycle; /* Cycle number before last 508 int l_prev_cycle; /* Cycle number before last
512 * block increment */ 509 * block increment */
@@ -521,12 +518,15 @@ typedef struct log {
521 int64_t l_grant_write_head; 518 int64_t l_grant_write_head;
522 519
523 /* 520 /*
524 * l_last_sync_lsn is an atomic so it can be set and read without 521 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
525 * needing to hold specific locks. To avoid operations contending with 522 * read without needing to hold specific locks. To avoid operations
526 * other hot objects, place it on a separate cacheline. 523 * contending with other hot objects, place each of them on a separate
524 * cacheline.
527 */ 525 */
528 /* lsn of last LR on disk */ 526 /* lsn of last LR on disk */
529 atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp; 527 atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp;
528 /* lsn of 1st LR with unflushed * buffers */
529 atomic64_t l_tail_lsn ____cacheline_aligned_in_smp;
530 530
531 /* The following field are used for debugging; need to hold icloglock */ 531 /* The following field are used for debugging; need to hold icloglock */
532#ifdef DEBUG 532#ifdef DEBUG
@@ -566,6 +566,31 @@ int xlog_write(struct log *log, struct xfs_log_vec *log_vector,
566 xlog_in_core_t **commit_iclog, uint flags); 566 xlog_in_core_t **commit_iclog, uint flags);
567 567
568/* 568/*
569 * When we crack an atomic LSN, we sample it first so that the value will not
570 * change while we are cracking it into the component values. This means we
571 * will always get consistent component values to work from. This should always
572 * be used to smaple and crack LSNs taht are stored and updated in atomic
573 * variables.
574 */
575static inline void
576xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
577{
578 xfs_lsn_t val = atomic64_read(lsn);
579
580 *cycle = CYCLE_LSN(val);
581 *block = BLOCK_LSN(val);
582}
583
584/*
585 * Calculate and assign a value to an atomic LSN variable from component pieces.
586 */
587static inline void
588xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
589{
590 atomic64_set(lsn, xlog_assign_lsn(cycle, block));
591}
592
593/*
569 * When we crack the grrant head, we sample it first so that the value will not 594 * When we crack the grrant head, we sample it first so that the value will not
570 * change while we are cracking it into the component values. This means we 595 * change while we are cracking it into the component values. This means we
571 * will always get consistent component values to work from. 596 * will always get consistent component values to work from.
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 18e1e18d714..204d8e5fa7f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -936,7 +936,7 @@ xlog_find_tail(
936 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 936 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
937 if (found == 2) 937 if (found == 2)
938 log->l_curr_cycle++; 938 log->l_curr_cycle++;
939 log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); 939 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
940 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); 940 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
941 xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, 941 xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle,
942 BBTOB(log->l_curr_block)); 942 BBTOB(log->l_curr_block));
@@ -971,7 +971,7 @@ xlog_find_tail(
971 } 971 }
972 after_umount_blk = (i + hblks + (int) 972 after_umount_blk = (i + hblks + (int)
973 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; 973 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
974 tail_lsn = log->l_tail_lsn; 974 tail_lsn = atomic64_read(&log->l_tail_lsn);
975 if (*head_blk == after_umount_blk && 975 if (*head_blk == after_umount_blk &&
976 be32_to_cpu(rhead->h_num_logops) == 1) { 976 be32_to_cpu(rhead->h_num_logops) == 1) {
977 umount_data_blk = (i + hblks) % log->l_logBBsize; 977 umount_data_blk = (i + hblks) % log->l_logBBsize;
@@ -986,12 +986,10 @@ xlog_find_tail(
986 * log records will point recovery to after the 986 * log records will point recovery to after the
987 * current unmount record. 987 * current unmount record.
988 */ 988 */
989 log->l_tail_lsn = 989 xlog_assign_atomic_lsn(&log->l_tail_lsn,
990 xlog_assign_lsn(log->l_curr_cycle, 990 log->l_curr_cycle, after_umount_blk);
991 after_umount_blk); 991 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
992 atomic64_set(&log->l_last_sync_lsn, 992 log->l_curr_cycle, after_umount_blk);
993 xlog_assign_lsn(log->l_curr_cycle,
994 after_umount_blk));
995 *tail_blk = after_umount_blk; 993 *tail_blk = after_umount_blk;
996 994
997 /* 995 /*