aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_aops.c31
-rw-r--r--fs/xfs/xfs_aops.h1
-rw-r--r--fs/xfs/xfs_file.c79
-rw-r--r--fs/xfs/xfs_iomap.c22
4 files changed, 53 insertions, 80 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 7575cfc3ad15..4a28fa91e3b1 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -200,7 +200,7 @@ xfs_setfilesize_trans_alloc(
200 * Update on-disk file size now that data has been written to disk. 200 * Update on-disk file size now that data has been written to disk.
201 */ 201 */
202STATIC int 202STATIC int
203xfs_setfilesize( 203__xfs_setfilesize(
204 struct xfs_inode *ip, 204 struct xfs_inode *ip,
205 struct xfs_trans *tp, 205 struct xfs_trans *tp,
206 xfs_off_t offset, 206 xfs_off_t offset,
@@ -225,6 +225,23 @@ xfs_setfilesize(
225 return xfs_trans_commit(tp); 225 return xfs_trans_commit(tp);
226} 226}
227 227
228int
229xfs_setfilesize(
230 struct xfs_inode *ip,
231 xfs_off_t offset,
232 size_t size)
233{
234 struct xfs_mount *mp = ip->i_mount;
235 struct xfs_trans *tp;
236 int error;
237
238 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
239 if (error)
240 return error;
241
242 return __xfs_setfilesize(ip, tp, offset, size);
243}
244
228STATIC int 245STATIC int
229xfs_setfilesize_ioend( 246xfs_setfilesize_ioend(
230 struct xfs_ioend *ioend, 247 struct xfs_ioend *ioend,
@@ -247,7 +264,7 @@ xfs_setfilesize_ioend(
247 return error; 264 return error;
248 } 265 }
249 266
250 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); 267 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
251} 268}
252 269
253/* 270/*
@@ -1336,13 +1353,12 @@ xfs_end_io_direct_write(
1336{ 1353{
1337 struct inode *inode = file_inode(iocb->ki_filp); 1354 struct inode *inode = file_inode(iocb->ki_filp);
1338 struct xfs_inode *ip = XFS_I(inode); 1355 struct xfs_inode *ip = XFS_I(inode);
1339 struct xfs_mount *mp = ip->i_mount;
1340 uintptr_t flags = (uintptr_t)private; 1356 uintptr_t flags = (uintptr_t)private;
1341 int error = 0; 1357 int error = 0;
1342 1358
1343 trace_xfs_end_io_direct_write(ip, offset, size); 1359 trace_xfs_end_io_direct_write(ip, offset, size);
1344 1360
1345 if (XFS_FORCED_SHUTDOWN(mp)) 1361 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
1346 return -EIO; 1362 return -EIO;
1347 1363
1348 if (size <= 0) 1364 if (size <= 0)
@@ -1380,14 +1396,9 @@ xfs_end_io_direct_write(
1380 1396
1381 error = xfs_iomap_write_unwritten(ip, offset, size); 1397 error = xfs_iomap_write_unwritten(ip, offset, size);
1382 } else if (flags & XFS_DIO_FLAG_APPEND) { 1398 } else if (flags & XFS_DIO_FLAG_APPEND) {
1383 struct xfs_trans *tp;
1384
1385 trace_xfs_end_io_direct_write_append(ip, offset, size); 1399 trace_xfs_end_io_direct_write_append(ip, offset, size);
1386 1400
1387 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, 1401 error = xfs_setfilesize(ip, offset, size);
1388 &tp);
1389 if (!error)
1390 error = xfs_setfilesize(ip, tp, offset, size);
1391 } 1402 }
1392 1403
1393 return error; 1404 return error;
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index bf2d9a141a73..1950e3bca2ac 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -62,6 +62,7 @@ int xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
62 62
63int xfs_end_io_direct_write(struct kiocb *iocb, loff_t offset, 63int xfs_end_io_direct_write(struct kiocb *iocb, loff_t offset,
64 ssize_t size, void *private); 64 ssize_t size, void *private);
65int xfs_setfilesize(struct xfs_inode *ip, xfs_off_t offset, size_t size);
65 66
66extern void xfs_count_page_state(struct page *, int *, int *); 67extern void xfs_count_page_state(struct page *, int *, int *);
67extern struct block_device *xfs_find_bdev_for_inode(struct inode *); 68extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index b927ea9abe33..c68517b0f248 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -333,10 +333,7 @@ xfs_file_dax_read(
333 struct kiocb *iocb, 333 struct kiocb *iocb,
334 struct iov_iter *to) 334 struct iov_iter *to)
335{ 335{
336 struct address_space *mapping = iocb->ki_filp->f_mapping; 336 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
337 struct inode *inode = mapping->host;
338 struct xfs_inode *ip = XFS_I(inode);
339 struct iov_iter data = *to;
340 size_t count = iov_iter_count(to); 337 size_t count = iov_iter_count(to);
341 ssize_t ret = 0; 338 ssize_t ret = 0;
342 339
@@ -346,11 +343,7 @@ xfs_file_dax_read(
346 return 0; /* skip atime */ 343 return 0; /* skip atime */
347 344
348 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 345 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
349 ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct, NULL, 0); 346 ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops);
350 if (ret > 0) {
351 iocb->ki_pos += ret;
352 iov_iter_advance(to, ret);
353 }
354 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 347 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
355 348
356 file_accessed(iocb->ki_filp); 349 file_accessed(iocb->ki_filp);
@@ -712,70 +705,32 @@ xfs_file_dax_write(
712 struct kiocb *iocb, 705 struct kiocb *iocb,
713 struct iov_iter *from) 706 struct iov_iter *from)
714{ 707{
715 struct address_space *mapping = iocb->ki_filp->f_mapping; 708 struct inode *inode = iocb->ki_filp->f_mapping->host;
716 struct inode *inode = mapping->host;
717 struct xfs_inode *ip = XFS_I(inode); 709 struct xfs_inode *ip = XFS_I(inode);
718 struct xfs_mount *mp = ip->i_mount; 710 int iolock = XFS_IOLOCK_EXCL;
719 ssize_t ret = 0; 711 ssize_t ret, error = 0;
720 int unaligned_io = 0; 712 size_t count;
721 int iolock; 713 loff_t pos;
722 struct iov_iter data;
723 714
724 /* "unaligned" here means not aligned to a filesystem block */
725 if ((iocb->ki_pos & mp->m_blockmask) ||
726 ((iocb->ki_pos + iov_iter_count(from)) & mp->m_blockmask)) {
727 unaligned_io = 1;
728 iolock = XFS_IOLOCK_EXCL;
729 } else if (mapping->nrpages) {
730 iolock = XFS_IOLOCK_EXCL;
731 } else {
732 iolock = XFS_IOLOCK_SHARED;
733 }
734 xfs_rw_ilock(ip, iolock); 715 xfs_rw_ilock(ip, iolock);
735
736 ret = xfs_file_aio_write_checks(iocb, from, &iolock); 716 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
737 if (ret) 717 if (ret)
738 goto out; 718 goto out;
739 719
740 /* 720 pos = iocb->ki_pos;
741 * Yes, even DAX files can have page cache attached to them: A zeroed 721 count = iov_iter_count(from);
742 * page is inserted into the pagecache when we have to serve a write
743 * fault on a hole. It should never be dirtied and can simply be
744 * dropped from the pagecache once we get real data for the page.
745 *
746 * XXX: This is racy against mmap, and there's nothing we can do about
747 * it. dax_do_io() should really do this invalidation internally as
748 * it will know if we've allocated over a holei for this specific IO and
749 * if so it needs to update the mapping tree and invalidate existing
750 * PTEs over the newly allocated range. Remove this invalidation when
751 * dax_do_io() is fixed up.
752 */
753 if (mapping->nrpages) {
754 loff_t end = iocb->ki_pos + iov_iter_count(from) - 1;
755 722
756 ret = invalidate_inode_pages2_range(mapping, 723 trace_xfs_file_dax_write(ip, count, pos);
757 iocb->ki_pos >> PAGE_SHIFT,
758 end >> PAGE_SHIFT);
759 WARN_ON_ONCE(ret);
760 }
761 724
762 if (iolock == XFS_IOLOCK_EXCL && !unaligned_io) { 725 ret = iomap_dax_rw(iocb, from, &xfs_iomap_ops);
763 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 726 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
764 iolock = XFS_IOLOCK_SHARED; 727 i_size_write(inode, iocb->ki_pos);
728 error = xfs_setfilesize(ip, pos, ret);
765 } 729 }
766 730
767 trace_xfs_file_dax_write(ip, iov_iter_count(from), iocb->ki_pos);
768
769 data = *from;
770 ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct,
771 xfs_end_io_direct_write, 0);
772 if (ret > 0) {
773 iocb->ki_pos += ret;
774 iov_iter_advance(from, ret);
775 }
776out: 731out:
777 xfs_rw_iunlock(ip, iolock); 732 xfs_rw_iunlock(ip, iolock);
778 return ret; 733 return error ? error : ret;
779} 734}
780 735
781STATIC ssize_t 736STATIC ssize_t
@@ -1514,7 +1469,7 @@ xfs_filemap_page_mkwrite(
1514 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1469 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1515 1470
1516 if (IS_DAX(inode)) { 1471 if (IS_DAX(inode)) {
1517 ret = dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault); 1472 ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
1518 } else { 1473 } else {
1519 ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops); 1474 ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
1520 ret = block_page_mkwrite_return(ret); 1475 ret = block_page_mkwrite_return(ret);
@@ -1548,7 +1503,7 @@ xfs_filemap_fault(
1548 * changes to xfs_get_blocks_direct() to map unwritten extent 1503 * changes to xfs_get_blocks_direct() to map unwritten extent
1549 * ioend for conversion on read-only mappings. 1504 * ioend for conversion on read-only mappings.
1550 */ 1505 */
1551 ret = dax_fault(vma, vmf, xfs_get_blocks_dax_fault); 1506 ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
1552 } else 1507 } else
1553 ret = filemap_fault(vma, vmf); 1508 ret = filemap_fault(vma, vmf);
1554 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1509 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index f96c8ffce5f4..c08253e11545 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -934,11 +934,13 @@ error_on_bmapi_transaction:
934 return error; 934 return error;
935} 935}
936 936
937static inline bool imap_needs_alloc(struct xfs_bmbt_irec *imap, int nimaps) 937static inline bool imap_needs_alloc(struct inode *inode,
938 struct xfs_bmbt_irec *imap, int nimaps)
938{ 939{
939 return !nimaps || 940 return !nimaps ||
940 imap->br_startblock == HOLESTARTBLOCK || 941 imap->br_startblock == HOLESTARTBLOCK ||
941 imap->br_startblock == DELAYSTARTBLOCK; 942 imap->br_startblock == DELAYSTARTBLOCK ||
943 (IS_DAX(inode) && ISUNWRITTEN(imap));
942} 944}
943 945
944static int 946static int
@@ -954,16 +956,18 @@ xfs_file_iomap_begin(
954 struct xfs_bmbt_irec imap; 956 struct xfs_bmbt_irec imap;
955 xfs_fileoff_t offset_fsb, end_fsb; 957 xfs_fileoff_t offset_fsb, end_fsb;
956 int nimaps = 1, error = 0; 958 int nimaps = 1, error = 0;
959 unsigned lockmode;
957 960
958 if (XFS_FORCED_SHUTDOWN(mp)) 961 if (XFS_FORCED_SHUTDOWN(mp))
959 return -EIO; 962 return -EIO;
960 963
961 if ((flags & IOMAP_WRITE) && !xfs_get_extsz_hint(ip)) { 964 if ((flags & IOMAP_WRITE) &&
965 !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
962 return xfs_file_iomap_begin_delay(inode, offset, length, flags, 966 return xfs_file_iomap_begin_delay(inode, offset, length, flags,
963 iomap); 967 iomap);
964 } 968 }
965 969
966 xfs_ilock(ip, XFS_ILOCK_EXCL); 970 lockmode = xfs_ilock_data_map_shared(ip);
967 971
968 ASSERT(offset <= mp->m_super->s_maxbytes); 972 ASSERT(offset <= mp->m_super->s_maxbytes);
969 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes) 973 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
@@ -974,11 +978,11 @@ xfs_file_iomap_begin(
974 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 978 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
975 &nimaps, XFS_BMAPI_ENTIRE); 979 &nimaps, XFS_BMAPI_ENTIRE);
976 if (error) { 980 if (error) {
977 xfs_iunlock(ip, XFS_ILOCK_EXCL); 981 xfs_iunlock(ip, lockmode);
978 return error; 982 return error;
979 } 983 }
980 984
981 if ((flags & IOMAP_WRITE) && imap_needs_alloc(&imap, nimaps)) { 985 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
982 /* 986 /*
983 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES 987 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
984 * pages to keep the chunks of work done where somewhat symmetric 988 * pages to keep the chunks of work done where somewhat symmetric
@@ -994,17 +998,19 @@ xfs_file_iomap_begin(
994 * xfs_iomap_write_direct() expects the shared lock. It 998 * xfs_iomap_write_direct() expects the shared lock. It
995 * is unlocked on return. 999 * is unlocked on return.
996 */ 1000 */
997 xfs_ilock_demote(ip, XFS_ILOCK_EXCL); 1001 if (lockmode == XFS_ILOCK_EXCL)
1002 xfs_ilock_demote(ip, lockmode);
998 error = xfs_iomap_write_direct(ip, offset, length, &imap, 1003 error = xfs_iomap_write_direct(ip, offset, length, &imap,
999 nimaps); 1004 nimaps);
1000 if (error) 1005 if (error)
1001 return error; 1006 return error;
1002 1007
1008 iomap->flags = IOMAP_F_NEW;
1003 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); 1009 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
1004 } else { 1010 } else {
1005 ASSERT(nimaps); 1011 ASSERT(nimaps);
1006 1012
1007 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1013 xfs_iunlock(ip, lockmode);
1008 trace_xfs_iomap_found(ip, offset, length, 0, &imap); 1014 trace_xfs_iomap_found(ip, offset, length, 0, &imap);
1009 } 1015 }
1010 1016