aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_file.c
diff options
context:
space:
mode:
authorDave Chinner <david@fromorbit.com>2015-04-16 08:13:18 -0400
committerDave Chinner <david@fromorbit.com>2015-04-16 08:13:18 -0400
commit542c311813d5cb2e6f0dfa9557f41c829b8fb6a0 (patch)
tree573c5644eb966e44112016c9ae86e80251326223 /fs/xfs/xfs_file.c
parent6a63ef064b2444883ce8b68b0779d0c739d27204 (diff)
parent0cefb29e6a63727bc7606c47fc538467594ef112 (diff)
Merge branch 'xfs-dio-extend-fix' into for-next
Conflicts: fs/xfs/xfs_file.c
Diffstat (limited to 'fs/xfs/xfs_file.c')
-rw-r--r--fs/xfs/xfs_file.c46
1 files changed, 42 insertions, 4 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index c203839cd5be..3a5d305e60c9 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -569,20 +569,41 @@ restart:
569 * write. If zeroing is needed and we are currently holding the 569 * write. If zeroing is needed and we are currently holding the
570 * iolock shared, we need to update it to exclusive which implies 570 * iolock shared, we need to update it to exclusive which implies
571 * having to redo all checks before. 571 * having to redo all checks before.
572 *
573 * We need to serialise against EOF updates that occur in IO
574 * completions here. We want to make sure that nobody is changing the
575 * size while we do this check until we have placed an IO barrier (i.e.
576 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
577 * The spinlock effectively forms a memory barrier once we have the
578 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
579 * and hence be able to correctly determine if we need to run zeroing.
572 */ 580 */
581 spin_lock(&ip->i_flags_lock);
573 if (*pos > i_size_read(inode)) { 582 if (*pos > i_size_read(inode)) {
574 bool zero = false; 583 bool zero = false;
575 584
585 spin_unlock(&ip->i_flags_lock);
576 if (*iolock == XFS_IOLOCK_SHARED) { 586 if (*iolock == XFS_IOLOCK_SHARED) {
577 xfs_rw_iunlock(ip, *iolock); 587 xfs_rw_iunlock(ip, *iolock);
578 *iolock = XFS_IOLOCK_EXCL; 588 *iolock = XFS_IOLOCK_EXCL;
579 xfs_rw_ilock(ip, *iolock); 589 xfs_rw_ilock(ip, *iolock);
590
591 /*
592 * We now have an IO submission barrier in place, but
593 * AIO can do EOF updates during IO completion and hence
594 * we now need to wait for all of them to drain. Non-AIO
595 * DIO will have drained before we are given the
596 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
597 * no-op.
598 */
599 inode_dio_wait(inode);
580 goto restart; 600 goto restart;
581 } 601 }
582 error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero); 602 error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
583 if (error) 603 if (error)
584 return error; 604 return error;
585 } 605 } else
606 spin_unlock(&ip->i_flags_lock);
586 607
587 /* 608 /*
588 * Updating the timestamps will grab the ilock again from 609 * Updating the timestamps will grab the ilock again from
@@ -644,6 +665,8 @@ xfs_file_dio_aio_write(
644 int iolock; 665 int iolock;
645 size_t count = iov_iter_count(from); 666 size_t count = iov_iter_count(from);
646 loff_t pos = iocb->ki_pos; 667 loff_t pos = iocb->ki_pos;
668 loff_t end;
669 struct iov_iter data;
647 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? 670 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
648 mp->m_rtdev_targp : mp->m_ddev_targp; 671 mp->m_rtdev_targp : mp->m_ddev_targp;
649 672
@@ -683,10 +706,11 @@ xfs_file_dio_aio_write(
683 if (ret) 706 if (ret)
684 goto out; 707 goto out;
685 iov_iter_truncate(from, count); 708 iov_iter_truncate(from, count);
709 end = pos + count - 1;
686 710
687 if (mapping->nrpages) { 711 if (mapping->nrpages) {
688 ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 712 ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
689 pos, pos + count - 1); 713 pos, end);
690 if (ret) 714 if (ret)
691 goto out; 715 goto out;
692 /* 716 /*
@@ -696,7 +720,7 @@ xfs_file_dio_aio_write(
696 */ 720 */
697 ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, 721 ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
698 pos >> PAGE_CACHE_SHIFT, 722 pos >> PAGE_CACHE_SHIFT,
699 (pos + count - 1) >> PAGE_CACHE_SHIFT); 723 end >> PAGE_CACHE_SHIFT);
700 WARN_ON_ONCE(ret); 724 WARN_ON_ONCE(ret);
701 ret = 0; 725 ret = 0;
702 } 726 }
@@ -713,8 +737,22 @@ xfs_file_dio_aio_write(
713 } 737 }
714 738
715 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); 739 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
716 ret = generic_file_direct_write(iocb, from, pos);
717 740
741 data = *from;
742 ret = mapping->a_ops->direct_IO(WRITE, iocb, &data, pos);
743
744 /* see generic_file_direct_write() for why this is necessary */
745 if (mapping->nrpages) {
746 invalidate_inode_pages2_range(mapping,
747 pos >> PAGE_CACHE_SHIFT,
748 end >> PAGE_CACHE_SHIFT);
749 }
750
751 if (ret > 0) {
752 pos += ret;
753 iov_iter_advance(from, ret);
754 iocb->ki_pos = pos;
755 }
718out: 756out:
719 xfs_rw_iunlock(ip, iolock); 757 xfs_rw_iunlock(ip, iolock);
720 758