aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c83
1 files changed, 0 insertions, 83 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7a83c2793956..9e60d0b8fa75 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3626,89 +3626,6 @@ static int ext4_get_block_write(struct inode *inode, sector_t iblock,
3626 EXT4_GET_BLOCKS_IO_CREATE_EXT); 3626 EXT4_GET_BLOCKS_IO_CREATE_EXT);
3627} 3627}
3628 3628
3629static void dump_completed_IO(struct inode * inode)
3630{
3631#ifdef EXT4_DEBUG
3632 struct list_head *cur, *before, *after;
3633 ext4_io_end_t *io, *io0, *io1;
3634 unsigned long flags;
3635
3636 if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
3637 ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
3638 return;
3639 }
3640
3641 ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
3642 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
3643 list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
3644 cur = &io->list;
3645 before = cur->prev;
3646 io0 = container_of(before, ext4_io_end_t, list);
3647 after = cur->next;
3648 io1 = container_of(after, ext4_io_end_t, list);
3649
3650 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
3651 io, inode->i_ino, io0, io1);
3652 }
3653 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
3654#endif
3655}
3656
3657/*
3658 * This function is called from ext4_sync_file().
3659 *
3660 * When IO is completed, the work to convert unwritten extents to
3661 * written is queued on workqueue but may not get immediately
3662 * scheduled. When fsync is called, we need to ensure the
3663 * conversion is complete before fsync returns.
3664 * The inode keeps track of a list of pending/completed IO that
3665 * might needs to do the conversion. This function walks through
3666 * the list and convert the related unwritten extents for completed IO
3667 * to written.
3668 * The function return the number of pending IOs on success.
3669 */
3670int flush_completed_IO(struct inode *inode)
3671{
3672 ext4_io_end_t *io;
3673 struct ext4_inode_info *ei = EXT4_I(inode);
3674 unsigned long flags;
3675 int ret = 0;
3676 int ret2 = 0;
3677
3678 if (list_empty(&ei->i_completed_io_list))
3679 return ret;
3680
3681 dump_completed_IO(inode);
3682 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3683 while (!list_empty(&ei->i_completed_io_list)){
3684 io = list_entry(ei->i_completed_io_list.next,
3685 ext4_io_end_t, list);
3686 /*
3687 * Calling ext4_end_io_nolock() to convert completed
3688 * IO to written.
3689 *
3690 * When ext4_sync_file() is called, run_queue() may already
3691 * about to flush the work corresponding to this io structure.
3692 * It will be upset if it founds the io structure related
3693 * to the work-to-be schedule is freed.
3694 *
3695 * Thus we need to keep the io structure still valid here after
3696 * convertion finished. The io structure has a flag to
3697 * avoid double converting from both fsync and background work
3698 * queue work.
3699 */
3700 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3701 ret = ext4_end_io_nolock(io);
3702 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3703 if (ret < 0)
3704 ret2 = ret;
3705 else
3706 list_del_init(&io->list);
3707 }
3708 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3709 return (ret2 < 0) ? ret2 : 0;
3710}
3711
3712static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3629static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3713 ssize_t size, void *private, int ret, 3630 ssize_t size, void *private, int ret,
3714 bool is_async) 3631 bool is_async)