diff options
author | David Chinner <david@fromorbit.com> | 2008-10-30 02:06:18 -0400 |
---|---|---|
committer | Lachlan McIlroy <lachlan@sgi.com> | 2008-10-30 02:06:18 -0400 |
commit | a167b17e899a930758506bbc18748078d6fd8c89 (patch) | |
tree | 698f8efbe5085ae75e0b46e1b71c7bfc7186d3b2 /fs/xfs/linux-2.6/xfs_sync.c | |
parent | fe4fa4b8e463fa5848ef9e86ed75d27501d0da1e (diff) |
[XFS] move xfssyncd code to xfs_sync.c
Move all the xfssyncd code to the new xfs_sync.c file. This places it
closer to the actual code that it interacts with, rather than just being
associated with high level VFS code.
SGI-PV: 988139
SGI-Modid: xfs-linux-melb:xfs-kern:32283a
Signed-off-by: David Chinner <david@fromorbit.com>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_sync.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.c | 163 |
1 files changed, 163 insertions, 0 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index c765eb2a8dc..a51534c71b3 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -44,6 +44,9 @@ | |||
44 | #include "xfs_inode_item.h" | 44 | #include "xfs_inode_item.h" |
45 | #include "xfs_rw.h" | 45 | #include "xfs_rw.h" |
46 | 46 | ||
47 | #include <linux/kthread.h> | ||
48 | #include <linux/freezer.h> | ||
49 | |||
47 | /* | 50 | /* |
48 | * xfs_sync flushes any pending I/O to file system vfsp. | 51 | * xfs_sync flushes any pending I/O to file system vfsp. |
49 | * | 52 | * |
@@ -603,3 +606,163 @@ xfs_syncsub( | |||
603 | 606 | ||
604 | return XFS_ERROR(last_error); | 607 | return XFS_ERROR(last_error); |
605 | } | 608 | } |
609 | |||
610 | /* | ||
611 | * Enqueue a work item to be picked up by the vfs xfssyncd thread. | ||
612 | * Doing this has two advantages: | ||
613 | * - It saves on stack space, which is tight in certain situations | ||
614 | * - It can be used (with care) as a mechanism to avoid deadlocks. | ||
615 | * Flushing while allocating in a full filesystem requires both. | ||
616 | */ | ||
617 | STATIC void | ||
618 | xfs_syncd_queue_work( | ||
619 | struct xfs_mount *mp, | ||
620 | void *data, | ||
621 | void (*syncer)(struct xfs_mount *, void *)) | ||
622 | { | ||
623 | struct bhv_vfs_sync_work *work; | ||
624 | |||
625 | work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP); | ||
626 | INIT_LIST_HEAD(&work->w_list); | ||
627 | work->w_syncer = syncer; | ||
628 | work->w_data = data; | ||
629 | work->w_mount = mp; | ||
630 | spin_lock(&mp->m_sync_lock); | ||
631 | list_add_tail(&work->w_list, &mp->m_sync_list); | ||
632 | spin_unlock(&mp->m_sync_lock); | ||
633 | wake_up_process(mp->m_sync_task); | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * Flush delayed allocate data, attempting to free up reserved space | ||
638 | * from existing allocations. At this point a new allocation attempt | ||
639 | * has failed with ENOSPC and we are in the process of scratching our | ||
640 | * heads, looking about for more room... | ||
641 | */ | ||
642 | STATIC void | ||
643 | xfs_flush_inode_work( | ||
644 | struct xfs_mount *mp, | ||
645 | void *arg) | ||
646 | { | ||
647 | struct inode *inode = arg; | ||
648 | filemap_flush(inode->i_mapping); | ||
649 | iput(inode); | ||
650 | } | ||
651 | |||
652 | void | ||
653 | xfs_flush_inode( | ||
654 | xfs_inode_t *ip) | ||
655 | { | ||
656 | struct inode *inode = VFS_I(ip); | ||
657 | |||
658 | igrab(inode); | ||
659 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); | ||
660 | delay(msecs_to_jiffies(500)); | ||
661 | } | ||
662 | |||
663 | /* | ||
664 | * This is the "bigger hammer" version of xfs_flush_inode_work... | ||
665 | * (IOW, "If at first you don't succeed, use a Bigger Hammer"). | ||
666 | */ | ||
667 | STATIC void | ||
668 | xfs_flush_device_work( | ||
669 | struct xfs_mount *mp, | ||
670 | void *arg) | ||
671 | { | ||
672 | struct inode *inode = arg; | ||
673 | sync_blockdev(mp->m_super->s_bdev); | ||
674 | iput(inode); | ||
675 | } | ||
676 | |||
677 | void | ||
678 | xfs_flush_device( | ||
679 | xfs_inode_t *ip) | ||
680 | { | ||
681 | struct inode *inode = VFS_I(ip); | ||
682 | |||
683 | igrab(inode); | ||
684 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); | ||
685 | delay(msecs_to_jiffies(500)); | ||
686 | xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); | ||
687 | } | ||
688 | |||
689 | STATIC void | ||
690 | xfs_sync_worker( | ||
691 | struct xfs_mount *mp, | ||
692 | void *unused) | ||
693 | { | ||
694 | int error; | ||
695 | |||
696 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) | ||
697 | error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR); | ||
698 | mp->m_sync_seq++; | ||
699 | wake_up(&mp->m_wait_single_sync_task); | ||
700 | } | ||
701 | |||
702 | STATIC int | ||
703 | xfssyncd( | ||
704 | void *arg) | ||
705 | { | ||
706 | struct xfs_mount *mp = arg; | ||
707 | long timeleft; | ||
708 | bhv_vfs_sync_work_t *work, *n; | ||
709 | LIST_HEAD (tmp); | ||
710 | |||
711 | set_freezable(); | ||
712 | timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); | ||
713 | for (;;) { | ||
714 | timeleft = schedule_timeout_interruptible(timeleft); | ||
715 | /* swsusp */ | ||
716 | try_to_freeze(); | ||
717 | if (kthread_should_stop() && list_empty(&mp->m_sync_list)) | ||
718 | break; | ||
719 | |||
720 | spin_lock(&mp->m_sync_lock); | ||
721 | /* | ||
722 | * We can get woken by laptop mode, to do a sync - | ||
723 | * that's the (only!) case where the list would be | ||
724 | * empty with time remaining. | ||
725 | */ | ||
726 | if (!timeleft || list_empty(&mp->m_sync_list)) { | ||
727 | if (!timeleft) | ||
728 | timeleft = xfs_syncd_centisecs * | ||
729 | msecs_to_jiffies(10); | ||
730 | INIT_LIST_HEAD(&mp->m_sync_work.w_list); | ||
731 | list_add_tail(&mp->m_sync_work.w_list, | ||
732 | &mp->m_sync_list); | ||
733 | } | ||
734 | list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list) | ||
735 | list_move(&work->w_list, &tmp); | ||
736 | spin_unlock(&mp->m_sync_lock); | ||
737 | |||
738 | list_for_each_entry_safe(work, n, &tmp, w_list) { | ||
739 | (*work->w_syncer)(mp, work->w_data); | ||
740 | list_del(&work->w_list); | ||
741 | if (work == &mp->m_sync_work) | ||
742 | continue; | ||
743 | kmem_free(work); | ||
744 | } | ||
745 | } | ||
746 | |||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | int | ||
751 | xfs_syncd_init( | ||
752 | struct xfs_mount *mp) | ||
753 | { | ||
754 | mp->m_sync_work.w_syncer = xfs_sync_worker; | ||
755 | mp->m_sync_work.w_mount = mp; | ||
756 | mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd"); | ||
757 | if (IS_ERR(mp->m_sync_task)) | ||
758 | return -PTR_ERR(mp->m_sync_task); | ||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | void | ||
763 | xfs_syncd_stop( | ||
764 | struct xfs_mount *mp) | ||
765 | { | ||
766 | kthread_stop(mp->m_sync_task); | ||
767 | } | ||
768 | |||