aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2012-10-08 06:56:02 -0400
committerBen Myers <bpm@sgi.com>2012-10-17 12:53:29 -0400
commitf661f1e0bf5002bdcc8b5810ad0a184a1841537f (patch)
treefe6a0e5f14e3b2c0c6e01dcddbf6de1b8b3de26e /fs/xfs/xfs_log.c
parent7f7bebefba152c5bdfe961cd2e97e8695a32998c (diff)
xfs: sync work is now only periodic log work
The only thing the periodic sync work does now is flush the AIL and idle the log. These are really functions of the log code, so move the work to xfs_log.c and rename it appropriately. The only wart that this leaves behind is the xfssyncd_centisecs sysctl, otherwise the xfssyncd is dead. Clean up any comments that related to xfssyncd to reflect it's passing. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c61
1 files changed, 51 insertions, 10 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 7f4f9370d0e7..efea12bfbd6b 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -34,6 +34,7 @@
34#include "xfs_dinode.h" 34#include "xfs_dinode.h"
35#include "xfs_inode.h" 35#include "xfs_inode.h"
36#include "xfs_trace.h" 36#include "xfs_trace.h"
37#include "xfs_fsops.h"
37 38
38kmem_zone_t *xfs_log_ticket_zone; 39kmem_zone_t *xfs_log_ticket_zone;
39 40
@@ -679,25 +680,29 @@ out:
679} 680}
680 681
681/* 682/*
682 * Finish the recovery of the file system. This is separate from 683 * Finish the recovery of the file system. This is separate from the
683 * the xfs_log_mount() call, because it depends on the code in 684 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
684 * xfs_mountfs() to read in the root and real-time bitmap inodes 685 * in the root and real-time bitmap inodes between calling xfs_log_mount() and
685 * between calling xfs_log_mount() and here. 686 * here.
686 * 687 *
687 * mp - ubiquitous xfs mount point structure 688 * If we finish recovery successfully, start the background log work. If we are
689 * not doing recovery, then we have a RO filesystem and we don't need to start
690 * it.
688 */ 691 */
689int 692int
690xfs_log_mount_finish(xfs_mount_t *mp) 693xfs_log_mount_finish(xfs_mount_t *mp)
691{ 694{
692 int error; 695 int error = 0;
693 696
694 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) 697 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
695 error = xlog_recover_finish(mp->m_log); 698 error = xlog_recover_finish(mp->m_log);
696 else { 699 if (!error)
697 error = 0; 700 xfs_log_work_queue(mp);
701 } else {
698 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 702 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
699 } 703 }
700 704
705
701 return error; 706 return error;
702} 707}
703 708
@@ -858,7 +863,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
858void 863void
859xfs_log_unmount(xfs_mount_t *mp) 864xfs_log_unmount(xfs_mount_t *mp)
860{ 865{
861 cancel_delayed_work_sync(&mp->m_sync_work); 866 cancel_delayed_work_sync(&mp->m_log->l_work);
862 xfs_trans_ail_destroy(mp); 867 xfs_trans_ail_destroy(mp);
863 xlog_dealloc_log(mp->m_log); 868 xlog_dealloc_log(mp->m_log);
864} 869}
@@ -1161,6 +1166,40 @@ done:
1161} /* xlog_get_iclog_buffer_size */ 1166} /* xlog_get_iclog_buffer_size */
1162 1167
1163 1168
1169void
1170xfs_log_work_queue(
1171 struct xfs_mount *mp)
1172{
1173 queue_delayed_work(xfs_syncd_wq, &mp->m_log->l_work,
1174 msecs_to_jiffies(xfs_syncd_centisecs * 10));
1175}
1176
1177/*
1178 * Every sync period we need to unpin all items in the AIL and push them to
1179 * disk. If there is nothing dirty, then we might need to cover the log to
1180 * indicate that the filesystem is idle.
1181 */
1182void
1183xfs_log_worker(
1184 struct work_struct *work)
1185{
1186 struct xlog *log = container_of(to_delayed_work(work),
1187 struct xlog, l_work);
1188 struct xfs_mount *mp = log->l_mp;
1189
1190 /* dgc: errors ignored - not fatal and nowhere to report them */
1191 if (xfs_log_need_covered(mp))
1192 xfs_fs_log_dummy(mp);
1193 else
1194 xfs_log_force(mp, 0);
1195
1196 /* start pushing all the metadata that is currently dirty */
1197 xfs_ail_push_all(mp->m_ail);
1198
1199 /* queue us up again */
1200 xfs_log_work_queue(mp);
1201}
1202
1164/* 1203/*
1165 * This routine initializes some of the log structure for a given mount point. 1204 * This routine initializes some of the log structure for a given mount point.
1166 * Its primary purpose is to fill in enough, so recovery can occur. However, 1205 * Its primary purpose is to fill in enough, so recovery can occur. However,
@@ -1195,6 +1234,7 @@ xlog_alloc_log(
1195 log->l_logBBsize = num_bblks; 1234 log->l_logBBsize = num_bblks;
1196 log->l_covered_state = XLOG_STATE_COVER_IDLE; 1235 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1197 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1236 log->l_flags |= XLOG_ACTIVE_RECOVERY;
1237 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1198 1238
1199 log->l_prev_block = -1; 1239 log->l_prev_block = -1;
1200 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1240 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
@@ -3700,3 +3740,4 @@ xlog_iclogs_empty(
3700 } while (iclog != log->l_iclog); 3740 } while (iclog != log->l_iclog);
3701 return 1; 3741 return 1;
3702} 3742}
3743