diff options
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/xfs_log.c | 55 | ||||
-rw-r--r-- | fs/xfs/xfs_log_priv.h | 9 | ||||
-rw-r--r-- | fs/xfs/xfs_log_recover.c | 6 |
3 files changed, 36 insertions, 34 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 1e2020d5a8b6..70790eb48336 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -675,12 +675,8 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
675 | if (XLOG_FORCED_SHUTDOWN(log)) | 675 | if (XLOG_FORCED_SHUTDOWN(log)) |
676 | return; | 676 | return; |
677 | 677 | ||
678 | if (tail_lsn == 0) { | 678 | if (tail_lsn == 0) |
679 | /* needed since sync_lsn is 64 bits */ | 679 | tail_lsn = atomic64_read(&log->l_last_sync_lsn); |
680 | spin_lock(&log->l_icloglock); | ||
681 | tail_lsn = log->l_last_sync_lsn; | ||
682 | spin_unlock(&log->l_icloglock); | ||
683 | } | ||
684 | 680 | ||
685 | spin_lock(&log->l_grant_lock); | 681 | spin_lock(&log->l_grant_lock); |
686 | 682 | ||
@@ -800,11 +796,9 @@ xlog_assign_tail_lsn(xfs_mount_t *mp) | |||
800 | 796 | ||
801 | tail_lsn = xfs_trans_ail_tail(mp->m_ail); | 797 | tail_lsn = xfs_trans_ail_tail(mp->m_ail); |
802 | spin_lock(&log->l_grant_lock); | 798 | spin_lock(&log->l_grant_lock); |
803 | if (tail_lsn != 0) { | 799 | if (!tail_lsn) |
804 | log->l_tail_lsn = tail_lsn; | 800 | tail_lsn = atomic64_read(&log->l_last_sync_lsn); |
805 | } else { | 801 | log->l_tail_lsn = tail_lsn; |
806 | tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; | ||
807 | } | ||
808 | spin_unlock(&log->l_grant_lock); | 802 | spin_unlock(&log->l_grant_lock); |
809 | 803 | ||
810 | return tail_lsn; | 804 | return tail_lsn; |
@@ -1014,9 +1008,9 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1014 | log->l_flags |= XLOG_ACTIVE_RECOVERY; | 1008 | log->l_flags |= XLOG_ACTIVE_RECOVERY; |
1015 | 1009 | ||
1016 | log->l_prev_block = -1; | 1010 | log->l_prev_block = -1; |
1017 | log->l_tail_lsn = xlog_assign_lsn(1, 0); | ||
1018 | /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ | 1011 | /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ |
1019 | log->l_last_sync_lsn = log->l_tail_lsn; | 1012 | log->l_tail_lsn = xlog_assign_lsn(1, 0); |
1013 | atomic64_set(&log->l_last_sync_lsn, xlog_assign_lsn(1, 0)); | ||
1020 | log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ | 1014 | log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ |
1021 | xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); | 1015 | xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); |
1022 | xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); | 1016 | xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); |
@@ -1194,6 +1188,7 @@ xlog_grant_push_ail( | |||
1194 | int need_bytes) | 1188 | int need_bytes) |
1195 | { | 1189 | { |
1196 | xfs_lsn_t threshold_lsn = 0; | 1190 | xfs_lsn_t threshold_lsn = 0; |
1191 | xfs_lsn_t last_sync_lsn; | ||
1197 | xfs_lsn_t tail_lsn; | 1192 | xfs_lsn_t tail_lsn; |
1198 | int free_blocks; | 1193 | int free_blocks; |
1199 | int free_bytes; | 1194 | int free_bytes; |
@@ -1228,10 +1223,12 @@ xlog_grant_push_ail( | |||
1228 | threshold_block); | 1223 | threshold_block); |
1229 | /* | 1224 | /* |
1230 | * Don't pass in an lsn greater than the lsn of the last | 1225 | * Don't pass in an lsn greater than the lsn of the last |
1231 | * log record known to be on disk. | 1226 | * log record known to be on disk. Use a snapshot of the last sync lsn |
1227 | * so that it doesn't change between the compare and the set. | ||
1232 | */ | 1228 | */ |
1233 | if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0) | 1229 | last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); |
1234 | threshold_lsn = log->l_last_sync_lsn; | 1230 | if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) |
1231 | threshold_lsn = last_sync_lsn; | ||
1235 | 1232 | ||
1236 | /* | 1233 | /* |
1237 | * Get the transaction layer to kick the dirty buffers out to | 1234 | * Get the transaction layer to kick the dirty buffers out to |
@@ -2194,7 +2191,7 @@ xlog_state_do_callback( | |||
2194 | lowest_lsn = xlog_get_lowest_lsn(log); | 2191 | lowest_lsn = xlog_get_lowest_lsn(log); |
2195 | if (lowest_lsn && | 2192 | if (lowest_lsn && |
2196 | XFS_LSN_CMP(lowest_lsn, | 2193 | XFS_LSN_CMP(lowest_lsn, |
2197 | be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { | 2194 | be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { |
2198 | iclog = iclog->ic_next; | 2195 | iclog = iclog->ic_next; |
2199 | continue; /* Leave this iclog for | 2196 | continue; /* Leave this iclog for |
2200 | * another thread */ | 2197 | * another thread */ |
@@ -2202,23 +2199,21 @@ xlog_state_do_callback( | |||
2202 | 2199 | ||
2203 | iclog->ic_state = XLOG_STATE_CALLBACK; | 2200 | iclog->ic_state = XLOG_STATE_CALLBACK; |
2204 | 2201 | ||
2205 | spin_unlock(&log->l_icloglock); | ||
2206 | 2202 | ||
2207 | /* l_last_sync_lsn field protected by | 2203 | /* |
2208 | * l_grant_lock. Don't worry about iclog's lsn. | 2204 | * update the last_sync_lsn before we drop the |
2209 | * No one else can be here except us. | 2205 | * icloglock to ensure we are the only one that |
2206 | * can update it. | ||
2210 | */ | 2207 | */ |
2211 | spin_lock(&log->l_grant_lock); | 2208 | ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), |
2212 | ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn, | 2209 | be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); |
2213 | be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); | 2210 | atomic64_set(&log->l_last_sync_lsn, |
2214 | log->l_last_sync_lsn = | 2211 | be64_to_cpu(iclog->ic_header.h_lsn)); |
2215 | be64_to_cpu(iclog->ic_header.h_lsn); | ||
2216 | spin_unlock(&log->l_grant_lock); | ||
2217 | 2212 | ||
2218 | } else { | 2213 | } else |
2219 | spin_unlock(&log->l_icloglock); | ||
2220 | ioerrors++; | 2214 | ioerrors++; |
2221 | } | 2215 | |
2216 | spin_unlock(&log->l_icloglock); | ||
2222 | 2217 | ||
2223 | /* | 2218 | /* |
2224 | * Keep processing entries in the callback list until | 2219 | * Keep processing entries in the callback list until |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index e2bb276eb2a7..958f356df10e 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -507,7 +507,6 @@ typedef struct log { | |||
507 | spinlock_t l_icloglock; /* grab to change iclog state */ | 507 | spinlock_t l_icloglock; /* grab to change iclog state */ |
508 | xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed | 508 | xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed |
509 | * buffers */ | 509 | * buffers */ |
510 | xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ | ||
511 | int l_curr_cycle; /* Cycle number of log writes */ | 510 | int l_curr_cycle; /* Cycle number of log writes */ |
512 | int l_prev_cycle; /* Cycle number before last | 511 | int l_prev_cycle; /* Cycle number before last |
513 | * block increment */ | 512 | * block increment */ |
@@ -521,6 +520,14 @@ typedef struct log { | |||
521 | int64_t l_grant_reserve_head; | 520 | int64_t l_grant_reserve_head; |
522 | int64_t l_grant_write_head; | 521 | int64_t l_grant_write_head; |
523 | 522 | ||
523 | /* | ||
524 | * l_last_sync_lsn is an atomic so it can be set and read without | ||
525 | * needing to hold specific locks. To avoid operations contending with | ||
526 | * other hot objects, place it on a separate cacheline. | ||
527 | */ | ||
528 | /* lsn of last LR on disk */ | ||
529 | atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp; | ||
530 | |||
524 | /* The following field are used for debugging; need to hold icloglock */ | 531 | /* The following field are used for debugging; need to hold icloglock */ |
525 | #ifdef DEBUG | 532 | #ifdef DEBUG |
526 | char *l_iclog_bak[XLOG_MAX_ICLOGS]; | 533 | char *l_iclog_bak[XLOG_MAX_ICLOGS]; |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 1550404a8aeb..18e1e18d7147 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -937,7 +937,7 @@ xlog_find_tail( | |||
937 | if (found == 2) | 937 | if (found == 2) |
938 | log->l_curr_cycle++; | 938 | log->l_curr_cycle++; |
939 | log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); | 939 | log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); |
940 | log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn); | 940 | atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); |
941 | xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, | 941 | xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, |
942 | BBTOB(log->l_curr_block)); | 942 | BBTOB(log->l_curr_block)); |
943 | xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle, | 943 | xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle, |
@@ -989,9 +989,9 @@ xlog_find_tail( | |||
989 | log->l_tail_lsn = | 989 | log->l_tail_lsn = |
990 | xlog_assign_lsn(log->l_curr_cycle, | 990 | xlog_assign_lsn(log->l_curr_cycle, |
991 | after_umount_blk); | 991 | after_umount_blk); |
992 | log->l_last_sync_lsn = | 992 | atomic64_set(&log->l_last_sync_lsn, |
993 | xlog_assign_lsn(log->l_curr_cycle, | 993 | xlog_assign_lsn(log->l_curr_cycle, |
994 | after_umount_blk); | 994 | after_umount_blk)); |
995 | *tail_blk = after_umount_blk; | 995 | *tail_blk = after_umount_blk; |
996 | 996 | ||
997 | /* | 997 | /* |