aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Chinner <david@fromorbit.com>2016-07-19 21:54:37 -0400
committerDave Chinner <david@fromorbit.com>2016-07-19 21:54:37 -0400
commitb47ec80bfe1eadd530a13522890d43d71eda10f8 (patch)
tree29e95ca6aebb2f6a911fdfb33cdf8f5b794c1779
parentbbfeb6141fcc8e2aea47b2f235cc8cc8ffb4a293 (diff)
parent16d4d43595b4780daac8fcea6d042689124cb094 (diff)
Merge branch 'xfs-4.8-split-dax-dio' into for-next
-rw-r--r--fs/xfs/xfs_aops.c24
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_file.c232
-rw-r--r--fs/xfs/xfs_inode.h10
-rw-r--r--fs/xfs/xfs_ioctl.c22
-rw-r--r--fs/xfs/xfs_ioctl.h3
-rw-r--r--fs/xfs/xfs_ioctl32.c6
-rw-r--r--fs/xfs/xfs_trace.h21
8 files changed, 203 insertions, 118 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 80714ebd54c0..b3682774a07d 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1303,7 +1303,7 @@ xfs_get_blocks_dax_fault(
1303 * whereas if we have flags set we will always be called in task context 1303 * whereas if we have flags set we will always be called in task context
1304 * (i.e. from a workqueue). 1304 * (i.e. from a workqueue).
1305 */ 1305 */
1306STATIC int 1306int
1307xfs_end_io_direct_write( 1307xfs_end_io_direct_write(
1308 struct kiocb *iocb, 1308 struct kiocb *iocb,
1309 loff_t offset, 1309 loff_t offset,
@@ -1374,24 +1374,10 @@ xfs_vm_direct_IO(
1374 struct kiocb *iocb, 1374 struct kiocb *iocb,
1375 struct iov_iter *iter) 1375 struct iov_iter *iter)
1376{ 1376{
1377 struct inode *inode = iocb->ki_filp->f_mapping->host; 1377 /*
1378 dio_iodone_t *endio = NULL; 1378 * We just need the method present so that open/fcntl allow direct I/O.
1379 int flags = 0; 1379 */
1380 struct block_device *bdev; 1380 return -EINVAL;
1381
1382 if (iov_iter_rw(iter) == WRITE) {
1383 endio = xfs_end_io_direct_write;
1384 flags = DIO_ASYNC_EXTEND;
1385 }
1386
1387 if (IS_DAX(inode)) {
1388 return dax_do_io(iocb, inode, iter,
1389 xfs_get_blocks_direct, endio, 0);
1390 }
1391
1392 bdev = xfs_find_bdev_for_inode(inode);
1393 return __blockdev_direct_IO(iocb, inode, bdev, iter,
1394 xfs_get_blocks_direct, endio, NULL, flags);
1395} 1381}
1396 1382
1397STATIC sector_t 1383STATIC sector_t
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 814aab790713..bf2d9a141a73 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -60,6 +60,9 @@ int xfs_get_blocks_direct(struct inode *inode, sector_t offset,
60int xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset, 60int xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
61 struct buffer_head *map_bh, int create); 61 struct buffer_head *map_bh, int create);
62 62
63int xfs_end_io_direct_write(struct kiocb *iocb, loff_t offset,
64 ssize_t size, void *private);
65
63extern void xfs_count_page_state(struct page *, int *, int *); 66extern void xfs_count_page_state(struct page *, int *, int *);
64extern struct block_device *xfs_find_bdev_for_inode(struct inode *); 67extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
65 68
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 713991c22781..0e7432558fc0 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -239,48 +239,35 @@ xfs_file_fsync(
239} 239}
240 240
241STATIC ssize_t 241STATIC ssize_t
242xfs_file_read_iter( 242xfs_file_dio_aio_read(
243 struct kiocb *iocb, 243 struct kiocb *iocb,
244 struct iov_iter *to) 244 struct iov_iter *to)
245{ 245{
246 struct file *file = iocb->ki_filp; 246 struct address_space *mapping = iocb->ki_filp->f_mapping;
247 struct inode *inode = file->f_mapping->host; 247 struct inode *inode = mapping->host;
248 struct xfs_inode *ip = XFS_I(inode); 248 struct xfs_inode *ip = XFS_I(inode);
249 struct xfs_mount *mp = ip->i_mount; 249 loff_t isize = i_size_read(inode);
250 size_t size = iov_iter_count(to); 250 size_t count = iov_iter_count(to);
251 struct iov_iter data;
252 struct xfs_buftarg *target;
251 ssize_t ret = 0; 253 ssize_t ret = 0;
252 int ioflags = 0;
253 xfs_fsize_t n;
254 loff_t pos = iocb->ki_pos;
255
256 XFS_STATS_INC(mp, xs_read_calls);
257 254
258 if (unlikely(iocb->ki_flags & IOCB_DIRECT)) 255 trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
259 ioflags |= XFS_IO_ISDIRECT;
260 if (file->f_mode & FMODE_NOCMTIME)
261 ioflags |= XFS_IO_INVIS;
262
263 if ((ioflags & XFS_IO_ISDIRECT) && !IS_DAX(inode)) {
264 xfs_buftarg_t *target =
265 XFS_IS_REALTIME_INODE(ip) ?
266 mp->m_rtdev_targp : mp->m_ddev_targp;
267 /* DIO must be aligned to device logical sector size */
268 if ((pos | size) & target->bt_logical_sectormask) {
269 if (pos == i_size_read(inode))
270 return 0;
271 return -EINVAL;
272 }
273 }
274 256
275 n = mp->m_super->s_maxbytes - pos; 257 if (!count)
276 if (n <= 0 || size == 0) 258 return 0; /* skip atime */
277 return 0;
278 259
279 if (n < size) 260 if (XFS_IS_REALTIME_INODE(ip))
280 size = n; 261 target = ip->i_mount->m_rtdev_targp;
262 else
263 target = ip->i_mount->m_ddev_targp;
281 264
282 if (XFS_FORCED_SHUTDOWN(mp)) 265 /* DIO must be aligned to device logical sector size */
283 return -EIO; 266 if ((iocb->ki_pos | count) & target->bt_logical_sectormask) {
267 if (iocb->ki_pos == isize)
268 return 0;
269 return -EINVAL;
270 }
284 271
285 /* 272 /*
286 * Locking is a bit tricky here. If we take an exclusive lock for direct 273 * Locking is a bit tricky here. If we take an exclusive lock for direct
@@ -293,7 +280,7 @@ xfs_file_read_iter(
293 * serialisation. 280 * serialisation.
294 */ 281 */
295 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 282 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
296 if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) { 283 if (mapping->nrpages) {
297 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 284 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
298 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 285 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
299 286
@@ -308,8 +295,8 @@ xfs_file_read_iter(
308 * flush and reduce the chances of repeated iolock cycles going 295 * flush and reduce the chances of repeated iolock cycles going
309 * forward. 296 * forward.
310 */ 297 */
311 if (inode->i_mapping->nrpages) { 298 if (mapping->nrpages) {
312 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); 299 ret = filemap_write_and_wait(mapping);
313 if (ret) { 300 if (ret) {
314 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); 301 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
315 return ret; 302 return ret;
@@ -320,20 +307,95 @@ xfs_file_read_iter(
320 * we fail to invalidate a page, but this should never 307 * we fail to invalidate a page, but this should never
321 * happen on XFS. Warn if it does fail. 308 * happen on XFS. Warn if it does fail.
322 */ 309 */
323 ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); 310 ret = invalidate_inode_pages2(mapping);
324 WARN_ON_ONCE(ret); 311 WARN_ON_ONCE(ret);
325 ret = 0; 312 ret = 0;
326 } 313 }
327 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 314 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
328 } 315 }
329 316
330 trace_xfs_file_read(ip, size, pos, ioflags); 317 data = *to;
318 ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data,
319 xfs_get_blocks_direct, NULL, NULL, 0);
320 if (ret > 0) {
321 iocb->ki_pos += ret;
322 iov_iter_advance(to, ret);
323 }
324 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
331 325
326 file_accessed(iocb->ki_filp);
327 return ret;
328}
329
330STATIC ssize_t
331xfs_file_dax_read(
332 struct kiocb *iocb,
333 struct iov_iter *to)
334{
335 struct address_space *mapping = iocb->ki_filp->f_mapping;
336 struct inode *inode = mapping->host;
337 struct xfs_inode *ip = XFS_I(inode);
338 struct iov_iter data = *to;
339 size_t count = iov_iter_count(to);
340 ssize_t ret = 0;
341
342 trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
343
344 if (!count)
345 return 0; /* skip atime */
346
347 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
348 ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct, NULL, 0);
349 if (ret > 0) {
350 iocb->ki_pos += ret;
351 iov_iter_advance(to, ret);
352 }
353 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
354
355 file_accessed(iocb->ki_filp);
356 return ret;
357}
358
359STATIC ssize_t
360xfs_file_buffered_aio_read(
361 struct kiocb *iocb,
362 struct iov_iter *to)
363{
364 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
365 ssize_t ret;
366
367 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
368
369 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
332 ret = generic_file_read_iter(iocb, to); 370 ret = generic_file_read_iter(iocb, to);
371 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
372
373 return ret;
374}
375
376STATIC ssize_t
377xfs_file_read_iter(
378 struct kiocb *iocb,
379 struct iov_iter *to)
380{
381 struct inode *inode = file_inode(iocb->ki_filp);
382 struct xfs_mount *mp = XFS_I(inode)->i_mount;
383 ssize_t ret = 0;
384
385 XFS_STATS_INC(mp, xs_read_calls);
386
387 if (XFS_FORCED_SHUTDOWN(mp))
388 return -EIO;
389
390 if (IS_DAX(inode))
391 ret = xfs_file_dax_read(iocb, to);
392 else if (iocb->ki_flags & IOCB_DIRECT)
393 ret = xfs_file_dio_aio_read(iocb, to);
394 else
395 ret = xfs_file_buffered_aio_read(iocb, to);
396
333 if (ret > 0) 397 if (ret > 0)
334 XFS_STATS_ADD(mp, xs_read_bytes, ret); 398 XFS_STATS_ADD(mp, xs_read_bytes, ret);
335
336 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
337 return ret; 399 return ret;
338} 400}
339 401
@@ -346,18 +408,14 @@ xfs_file_splice_read(
346 unsigned int flags) 408 unsigned int flags)
347{ 409{
348 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); 410 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
349 int ioflags = 0;
350 ssize_t ret; 411 ssize_t ret;
351 412
352 XFS_STATS_INC(ip->i_mount, xs_read_calls); 413 XFS_STATS_INC(ip->i_mount, xs_read_calls);
353 414
354 if (infilp->f_mode & FMODE_NOCMTIME)
355 ioflags |= XFS_IO_INVIS;
356
357 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 415 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
358 return -EIO; 416 return -EIO;
359 417
360 trace_xfs_file_splice_read(ip, count, *ppos, ioflags); 418 trace_xfs_file_splice_read(ip, count, *ppos);
361 419
362 /* 420 /*
363 * DAX inodes cannot ues the page cache for splice, so we have to push 421 * DAX inodes cannot ues the page cache for splice, so we have to push
@@ -553,8 +611,7 @@ xfs_file_dio_aio_write(
553 mp->m_rtdev_targp : mp->m_ddev_targp; 611 mp->m_rtdev_targp : mp->m_ddev_targp;
554 612
555 /* DIO must be aligned to device logical sector size */ 613 /* DIO must be aligned to device logical sector size */
556 if (!IS_DAX(inode) && 614 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
557 ((iocb->ki_pos | count) & target->bt_logical_sectormask))
558 return -EINVAL; 615 return -EINVAL;
559 616
560 /* "unaligned" here means not aligned to a filesystem block */ 617 /* "unaligned" here means not aligned to a filesystem block */
@@ -593,7 +650,7 @@ xfs_file_dio_aio_write(
593 end = iocb->ki_pos + count - 1; 650 end = iocb->ki_pos + count - 1;
594 651
595 /* 652 /*
596 * See xfs_file_read_iter() for why we do a full-file flush here. 653 * See xfs_file_dio_aio_read() for why we do a full-file flush here.
597 */ 654 */
598 if (mapping->nrpages) { 655 if (mapping->nrpages) {
599 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); 656 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
@@ -620,10 +677,12 @@ xfs_file_dio_aio_write(
620 iolock = XFS_IOLOCK_SHARED; 677 iolock = XFS_IOLOCK_SHARED;
621 } 678 }
622 679
623 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); 680 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
624 681
625 data = *from; 682 data = *from;
626 ret = mapping->a_ops->direct_IO(iocb, &data); 683 ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data,
684 xfs_get_blocks_direct, xfs_end_io_direct_write,
685 NULL, DIO_ASYNC_EXTEND);
627 686
628 /* see generic_file_direct_write() for why this is necessary */ 687 /* see generic_file_direct_write() for why this is necessary */
629 if (mapping->nrpages) { 688 if (mapping->nrpages) {
@@ -640,10 +699,70 @@ out:
640 xfs_rw_iunlock(ip, iolock); 699 xfs_rw_iunlock(ip, iolock);
641 700
642 /* 701 /*
643 * No fallback to buffered IO on errors for XFS. DAX can result in 702 * No fallback to buffered IO on errors for XFS, direct IO will either
644 * partial writes, but direct IO will either complete fully or fail. 703 * complete fully or fail.
704 */
705 ASSERT(ret < 0 || ret == count);
706 return ret;
707}
708
709STATIC ssize_t
710xfs_file_dax_write(
711 struct kiocb *iocb,
712 struct iov_iter *from)
713{
714 struct address_space *mapping = iocb->ki_filp->f_mapping;
715 struct inode *inode = mapping->host;
716 struct xfs_inode *ip = XFS_I(inode);
717 struct xfs_mount *mp = ip->i_mount;
718 ssize_t ret = 0;
719 int unaligned_io = 0;
720 int iolock;
721 struct iov_iter data;
722
723 /* "unaligned" here means not aligned to a filesystem block */
724 if ((iocb->ki_pos & mp->m_blockmask) ||
725 ((iocb->ki_pos + iov_iter_count(from)) & mp->m_blockmask)) {
726 unaligned_io = 1;
727 iolock = XFS_IOLOCK_EXCL;
728 } else if (mapping->nrpages) {
729 iolock = XFS_IOLOCK_EXCL;
730 } else {
731 iolock = XFS_IOLOCK_SHARED;
732 }
733 xfs_rw_ilock(ip, iolock);
734
735 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
736 if (ret)
737 goto out;
738
739 /*
740 * Yes, even DAX files can have page cache attached to them: A zeroed
741 * page is inserted into the pagecache when we have to serve a write
742 * fault on a hole. It should never be dirtied and can simply be
743 * dropped from the pagecache once we get real data for the page.
645 */ 744 */
646 ASSERT(ret < 0 || ret == count || IS_DAX(VFS_I(ip))); 745 if (mapping->nrpages) {
746 ret = invalidate_inode_pages2(mapping);
747 WARN_ON_ONCE(ret);
748 }
749
750 if (iolock == XFS_IOLOCK_EXCL && !unaligned_io) {
751 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
752 iolock = XFS_IOLOCK_SHARED;
753 }
754
755 trace_xfs_file_dax_write(ip, iov_iter_count(from), iocb->ki_pos);
756
757 data = *from;
758 ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct,
759 xfs_end_io_direct_write, 0);
760 if (ret > 0) {
761 iocb->ki_pos += ret;
762 iov_iter_advance(from, ret);
763 }
764out:
765 xfs_rw_iunlock(ip, iolock);
647 return ret; 766 return ret;
648} 767}
649 768
@@ -670,8 +789,7 @@ xfs_file_buffered_aio_write(
670 current->backing_dev_info = inode_to_bdi(inode); 789 current->backing_dev_info = inode_to_bdi(inode);
671 790
672write_retry: 791write_retry:
673 trace_xfs_file_buffered_write(ip, iov_iter_count(from), 792 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
674 iocb->ki_pos, 0);
675 ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops); 793 ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
676 if (likely(ret >= 0)) 794 if (likely(ret >= 0))
677 iocb->ki_pos += ret; 795 iocb->ki_pos += ret;
@@ -726,7 +844,9 @@ xfs_file_write_iter(
726 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 844 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
727 return -EIO; 845 return -EIO;
728 846
729 if ((iocb->ki_flags & IOCB_DIRECT) || IS_DAX(inode)) 847 if (IS_DAX(inode))
848 ret = xfs_file_dax_write(iocb, from);
849 else if (iocb->ki_flags & IOCB_DIRECT)
730 ret = xfs_file_dio_aio_write(iocb, from); 850 ret = xfs_file_dio_aio_write(iocb, from);
731 else 851 else
732 ret = xfs_file_buffered_aio_write(iocb, from); 852 ret = xfs_file_buffered_aio_write(iocb, from);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 0c19d3d05a91..8eb78ec4a6e2 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -473,14 +473,4 @@ do { \
473 473
474extern struct kmem_zone *xfs_inode_zone; 474extern struct kmem_zone *xfs_inode_zone;
475 475
476/*
477 * Flags for read/write calls
478 */
479#define XFS_IO_ISDIRECT 0x00001 /* bypass page cache */
480#define XFS_IO_INVIS 0x00002 /* don't update inode timestamps */
481
482#define XFS_IO_FLAGS \
483 { XFS_IO_ISDIRECT, "DIRECT" }, \
484 { XFS_IO_INVIS, "INVIS"}
485
486#endif /* __XFS_INODE_H__ */ 476#endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 408f3ad348ab..9a7c87809d3b 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -595,13 +595,12 @@ xfs_attrmulti_by_handle(
595 595
596int 596int
597xfs_ioc_space( 597xfs_ioc_space(
598 struct xfs_inode *ip,
599 struct inode *inode,
600 struct file *filp, 598 struct file *filp,
601 int ioflags,
602 unsigned int cmd, 599 unsigned int cmd,
603 xfs_flock64_t *bf) 600 xfs_flock64_t *bf)
604{ 601{
602 struct inode *inode = file_inode(filp);
603 struct xfs_inode *ip = XFS_I(inode);
605 struct iattr iattr; 604 struct iattr iattr;
606 enum xfs_prealloc_flags flags = 0; 605 enum xfs_prealloc_flags flags = 0;
607 uint iolock = XFS_IOLOCK_EXCL; 606 uint iolock = XFS_IOLOCK_EXCL;
@@ -626,7 +625,7 @@ xfs_ioc_space(
626 625
627 if (filp->f_flags & O_DSYNC) 626 if (filp->f_flags & O_DSYNC)
628 flags |= XFS_PREALLOC_SYNC; 627 flags |= XFS_PREALLOC_SYNC;
629 if (ioflags & XFS_IO_INVIS) 628 if (filp->f_mode & FMODE_NOCMTIME)
630 flags |= XFS_PREALLOC_INVISIBLE; 629 flags |= XFS_PREALLOC_INVISIBLE;
631 630
632 error = mnt_want_write_file(filp); 631 error = mnt_want_write_file(filp);
@@ -1464,8 +1463,7 @@ xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full)
1464 1463
1465STATIC int 1464STATIC int
1466xfs_ioc_getbmap( 1465xfs_ioc_getbmap(
1467 struct xfs_inode *ip, 1466 struct file *file,
1468 int ioflags,
1469 unsigned int cmd, 1467 unsigned int cmd,
1470 void __user *arg) 1468 void __user *arg)
1471{ 1469{
@@ -1479,10 +1477,10 @@ xfs_ioc_getbmap(
1479 return -EINVAL; 1477 return -EINVAL;
1480 1478
1481 bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); 1479 bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
1482 if (ioflags & XFS_IO_INVIS) 1480 if (file->f_mode & FMODE_NOCMTIME)
1483 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; 1481 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
1484 1482
1485 error = xfs_getbmap(ip, &bmx, xfs_getbmap_format, 1483 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, xfs_getbmap_format,
1486 (__force struct getbmap *)arg+1); 1484 (__force struct getbmap *)arg+1);
1487 if (error) 1485 if (error)
1488 return error; 1486 return error;
@@ -1630,12 +1628,8 @@ xfs_file_ioctl(
1630 struct xfs_inode *ip = XFS_I(inode); 1628 struct xfs_inode *ip = XFS_I(inode);
1631 struct xfs_mount *mp = ip->i_mount; 1629 struct xfs_mount *mp = ip->i_mount;
1632 void __user *arg = (void __user *)p; 1630 void __user *arg = (void __user *)p;
1633 int ioflags = 0;
1634 int error; 1631 int error;
1635 1632
1636 if (filp->f_mode & FMODE_NOCMTIME)
1637 ioflags |= XFS_IO_INVIS;
1638
1639 trace_xfs_file_ioctl(ip); 1633 trace_xfs_file_ioctl(ip);
1640 1634
1641 switch (cmd) { 1635 switch (cmd) {
@@ -1654,7 +1648,7 @@ xfs_file_ioctl(
1654 1648
1655 if (copy_from_user(&bf, arg, sizeof(bf))) 1649 if (copy_from_user(&bf, arg, sizeof(bf)))
1656 return -EFAULT; 1650 return -EFAULT;
1657 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); 1651 return xfs_ioc_space(filp, cmd, &bf);
1658 } 1652 }
1659 case XFS_IOC_DIOINFO: { 1653 case XFS_IOC_DIOINFO: {
1660 struct dioattr da; 1654 struct dioattr da;
@@ -1713,7 +1707,7 @@ xfs_file_ioctl(
1713 1707
1714 case XFS_IOC_GETBMAP: 1708 case XFS_IOC_GETBMAP:
1715 case XFS_IOC_GETBMAPA: 1709 case XFS_IOC_GETBMAPA:
1716 return xfs_ioc_getbmap(ip, ioflags, cmd, arg); 1710 return xfs_ioc_getbmap(filp, cmd, arg);
1717 1711
1718 case XFS_IOC_GETBMAPX: 1712 case XFS_IOC_GETBMAPX:
1719 return xfs_ioc_getbmapx(ip, arg); 1713 return xfs_ioc_getbmapx(ip, arg);
diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h
index 77c02c7900b6..8b52881bfd90 100644
--- a/fs/xfs/xfs_ioctl.h
+++ b/fs/xfs/xfs_ioctl.h
@@ -20,10 +20,7 @@
20 20
21extern int 21extern int
22xfs_ioc_space( 22xfs_ioc_space(
23 struct xfs_inode *ip,
24 struct inode *inode,
25 struct file *filp, 23 struct file *filp,
26 int ioflags,
27 unsigned int cmd, 24 unsigned int cmd,
28 xfs_flock64_t *bf); 25 xfs_flock64_t *bf);
29 26
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 1a05d8ae327d..321f57721b92 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -532,12 +532,8 @@ xfs_file_compat_ioctl(
532 struct xfs_inode *ip = XFS_I(inode); 532 struct xfs_inode *ip = XFS_I(inode);
533 struct xfs_mount *mp = ip->i_mount; 533 struct xfs_mount *mp = ip->i_mount;
534 void __user *arg = (void __user *)p; 534 void __user *arg = (void __user *)p;
535 int ioflags = 0;
536 int error; 535 int error;
537 536
538 if (filp->f_mode & FMODE_NOCMTIME)
539 ioflags |= XFS_IO_INVIS;
540
541 trace_xfs_file_compat_ioctl(ip); 537 trace_xfs_file_compat_ioctl(ip);
542 538
543 switch (cmd) { 539 switch (cmd) {
@@ -589,7 +585,7 @@ xfs_file_compat_ioctl(
589 if (xfs_compat_flock64_copyin(&bf, arg)) 585 if (xfs_compat_flock64_copyin(&bf, arg))
590 return -EFAULT; 586 return -EFAULT;
591 cmd = _NATIVE_IOC(cmd, struct xfs_flock64); 587 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
592 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); 588 return xfs_ioc_space(filp, cmd, &bf);
593 } 589 }
594 case XFS_IOC_FSGEOMETRY_V1_32: 590 case XFS_IOC_FSGEOMETRY_V1_32:
595 return xfs_compat_ioc_fsgeometry_v1(mp, arg); 591 return xfs_compat_ioc_fsgeometry_v1(mp, arg);
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 6787a9f96526..145169093fe0 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1135,15 +1135,14 @@ TRACE_EVENT(xfs_log_assign_tail_lsn,
1135) 1135)
1136 1136
1137DECLARE_EVENT_CLASS(xfs_file_class, 1137DECLARE_EVENT_CLASS(xfs_file_class,
1138 TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), 1138 TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset),
1139 TP_ARGS(ip, count, offset, flags), 1139 TP_ARGS(ip, count, offset),
1140 TP_STRUCT__entry( 1140 TP_STRUCT__entry(
1141 __field(dev_t, dev) 1141 __field(dev_t, dev)
1142 __field(xfs_ino_t, ino) 1142 __field(xfs_ino_t, ino)
1143 __field(xfs_fsize_t, size) 1143 __field(xfs_fsize_t, size)
1144 __field(loff_t, offset) 1144 __field(loff_t, offset)
1145 __field(size_t, count) 1145 __field(size_t, count)
1146 __field(int, flags)
1147 ), 1146 ),
1148 TP_fast_assign( 1147 TP_fast_assign(
1149 __entry->dev = VFS_I(ip)->i_sb->s_dev; 1148 __entry->dev = VFS_I(ip)->i_sb->s_dev;
@@ -1151,25 +1150,25 @@ DECLARE_EVENT_CLASS(xfs_file_class,
1151 __entry->size = ip->i_d.di_size; 1150 __entry->size = ip->i_d.di_size;
1152 __entry->offset = offset; 1151 __entry->offset = offset;
1153 __entry->count = count; 1152 __entry->count = count;
1154 __entry->flags = flags;
1155 ), 1153 ),
1156 TP_printk("dev %d:%d ino 0x%llx size 0x%llx " 1154 TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx count 0x%zx",
1157 "offset 0x%llx count 0x%zx ioflags %s",
1158 MAJOR(__entry->dev), MINOR(__entry->dev), 1155 MAJOR(__entry->dev), MINOR(__entry->dev),
1159 __entry->ino, 1156 __entry->ino,
1160 __entry->size, 1157 __entry->size,
1161 __entry->offset, 1158 __entry->offset,
1162 __entry->count, 1159 __entry->count)
1163 __print_flags(__entry->flags, "|", XFS_IO_FLAGS))
1164) 1160)
1165 1161
1166#define DEFINE_RW_EVENT(name) \ 1162#define DEFINE_RW_EVENT(name) \
1167DEFINE_EVENT(xfs_file_class, name, \ 1163DEFINE_EVENT(xfs_file_class, name, \
1168 TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \ 1164 TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset), \
1169 TP_ARGS(ip, count, offset, flags)) 1165 TP_ARGS(ip, count, offset))
1170DEFINE_RW_EVENT(xfs_file_read); 1166DEFINE_RW_EVENT(xfs_file_buffered_read);
1167DEFINE_RW_EVENT(xfs_file_direct_read);
1168DEFINE_RW_EVENT(xfs_file_dax_read);
1171DEFINE_RW_EVENT(xfs_file_buffered_write); 1169DEFINE_RW_EVENT(xfs_file_buffered_write);
1172DEFINE_RW_EVENT(xfs_file_direct_write); 1170DEFINE_RW_EVENT(xfs_file_direct_write);
1171DEFINE_RW_EVENT(xfs_file_dax_write);
1173DEFINE_RW_EVENT(xfs_file_splice_read); 1172DEFINE_RW_EVENT(xfs_file_splice_read);
1174 1173
1175DECLARE_EVENT_CLASS(xfs_page_class, 1174DECLARE_EVENT_CLASS(xfs_page_class,