diff options
| author | Christoph Hellwig <hch@lst.de> | 2016-07-19 21:35:42 -0400 |
|---|---|---|
| committer | Dave Chinner <david@fromorbit.com> | 2016-07-19 21:35:42 -0400 |
| commit | bbc5a740c4f27a9732a3a3decf3186b4bce21108 (patch) | |
| tree | 9e0f30810d55060427d5d01aae1d2b3438affb52 | |
| parent | cf810712cc82cbfab8f08a46ca6c0289d386a303 (diff) | |
xfs: split xfs_file_read_iter into buffered and direct I/O helpers
Similar to what we did on the write side a while ago.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
| -rw-r--r-- | fs/xfs/xfs_file.c | 83 |
1 files changed, 57 insertions, 26 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 7ec8225b7fd2..fdb123ffd616 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
| @@ -282,35 +282,33 @@ xfs_file_fsync( | |||
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | STATIC ssize_t | 284 | STATIC ssize_t |
| 285 | xfs_file_read_iter( | 285 | xfs_file_dio_aio_read( |
| 286 | struct kiocb *iocb, | 286 | struct kiocb *iocb, |
| 287 | struct iov_iter *to) | 287 | struct iov_iter *to) |
| 288 | { | 288 | { |
| 289 | struct file *file = iocb->ki_filp; | 289 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
| 290 | struct inode *inode = file->f_mapping->host; | 290 | struct inode *inode = mapping->host; |
| 291 | struct xfs_inode *ip = XFS_I(inode); | 291 | struct xfs_inode *ip = XFS_I(inode); |
| 292 | struct xfs_mount *mp = ip->i_mount; | 292 | size_t count = iov_iter_count(to); |
| 293 | size_t size = iov_iter_count(to); | 293 | struct xfs_buftarg *target; |
| 294 | ssize_t ret = 0; | 294 | ssize_t ret = 0; |
| 295 | loff_t pos = iocb->ki_pos; | ||
| 296 | 295 | ||
| 297 | XFS_STATS_INC(mp, xs_read_calls); | 296 | trace_xfs_file_direct_read(ip, count, iocb->ki_pos); |
| 298 | 297 | ||
| 299 | if ((iocb->ki_flags & IOCB_DIRECT) && !IS_DAX(inode)) { | 298 | if (XFS_IS_REALTIME_INODE(ip)) |
| 300 | xfs_buftarg_t *target = | 299 | target = ip->i_mount->m_rtdev_targp; |
| 301 | XFS_IS_REALTIME_INODE(ip) ? | 300 | else |
| 302 | mp->m_rtdev_targp : mp->m_ddev_targp; | 301 | target = ip->i_mount->m_ddev_targp; |
| 302 | |||
| 303 | if (!IS_DAX(inode)) { | ||
| 303 | /* DIO must be aligned to device logical sector size */ | 304 | /* DIO must be aligned to device logical sector size */ |
| 304 | if ((pos | size) & target->bt_logical_sectormask) { | 305 | if ((iocb->ki_pos | count) & target->bt_logical_sectormask) { |
| 305 | if (pos == i_size_read(inode)) | 306 | if (iocb->ki_pos == i_size_read(inode)) |
| 306 | return 0; | 307 | return 0; |
| 307 | return -EINVAL; | 308 | return -EINVAL; |
| 308 | } | 309 | } |
| 309 | } | 310 | } |
| 310 | 311 | ||
| 311 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
| 312 | return -EIO; | ||
| 313 | |||
| 314 | /* | 312 | /* |
| 315 | * Locking is a bit tricky here. If we take an exclusive lock for direct | 313 | * Locking is a bit tricky here. If we take an exclusive lock for direct |
| 316 | * IO, we effectively serialise all new concurrent read IO to this file | 314 | * IO, we effectively serialise all new concurrent read IO to this file |
| @@ -322,7 +320,7 @@ xfs_file_read_iter( | |||
| 322 | * serialisation. | 320 | * serialisation. |
| 323 | */ | 321 | */ |
| 324 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | 322 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
| 325 | if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_mapping->nrpages) { | 323 | if (mapping->nrpages) { |
| 326 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | 324 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
| 327 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); | 325 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); |
| 328 | 326 | ||
| @@ -337,8 +335,8 @@ xfs_file_read_iter( | |||
| 337 | * flush and reduce the chances of repeated iolock cycles going | 335 | * flush and reduce the chances of repeated iolock cycles going |
| 338 | * forward. | 336 | * forward. |
| 339 | */ | 337 | */ |
| 340 | if (inode->i_mapping->nrpages) { | 338 | if (mapping->nrpages) { |
| 341 | ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); | 339 | ret = filemap_write_and_wait(mapping); |
| 342 | if (ret) { | 340 | if (ret) { |
| 343 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | 341 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); |
| 344 | return ret; | 342 | return ret; |
| @@ -349,23 +347,56 @@ xfs_file_read_iter( | |||
| 349 | * we fail to invalidate a page, but this should never | 347 | * we fail to invalidate a page, but this should never |
| 350 | * happen on XFS. Warn if it does fail. | 348 | * happen on XFS. Warn if it does fail. |
| 351 | */ | 349 | */ |
| 352 | ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); | 350 | ret = invalidate_inode_pages2(mapping); |
| 353 | WARN_ON_ONCE(ret); | 351 | WARN_ON_ONCE(ret); |
| 354 | ret = 0; | 352 | ret = 0; |
| 355 | } | 353 | } |
| 356 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); | 354 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
| 357 | } | 355 | } |
| 358 | 356 | ||
| 357 | ret = generic_file_read_iter(iocb, to); | ||
| 358 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | ||
| 359 | |||
| 360 | return ret; | ||
| 361 | } | ||
| 362 | |||
| 363 | STATIC ssize_t | ||
| 364 | xfs_file_buffered_aio_read( | ||
| 365 | struct kiocb *iocb, | ||
| 366 | struct iov_iter *to) | ||
| 367 | { | ||
| 368 | struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); | ||
| 369 | ssize_t ret; | ||
| 370 | |||
| 371 | trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos); | ||
| 372 | |||
| 373 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | ||
| 374 | ret = generic_file_read_iter(iocb, to); | ||
| 375 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | ||
| 376 | |||
| 377 | return ret; | ||
| 378 | } | ||
| 379 | |||
| 380 | STATIC ssize_t | ||
| 381 | xfs_file_read_iter( | ||
| 382 | struct kiocb *iocb, | ||
| 383 | struct iov_iter *to) | ||
| 384 | { | ||
| 385 | struct xfs_mount *mp = XFS_I(file_inode(iocb->ki_filp))->i_mount; | ||
| 386 | ssize_t ret = 0; | ||
| 387 | |||
| 388 | XFS_STATS_INC(mp, xs_read_calls); | ||
| 389 | |||
| 390 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
| 391 | return -EIO; | ||
| 392 | |||
| 359 | if (iocb->ki_flags & IOCB_DIRECT) | 393 | if (iocb->ki_flags & IOCB_DIRECT) |
| 360 | trace_xfs_file_direct_read(ip, size, pos); | 394 | ret = xfs_file_dio_aio_read(iocb, to); |
| 361 | else | 395 | else |
| 362 | trace_xfs_file_buffered_read(ip, size, pos); | 396 | ret = xfs_file_buffered_aio_read(iocb, to); |
| 363 | 397 | ||
| 364 | ret = generic_file_read_iter(iocb, to); | ||
| 365 | if (ret > 0) | 398 | if (ret > 0) |
| 366 | XFS_STATS_ADD(mp, xs_read_bytes, ret); | 399 | XFS_STATS_ADD(mp, xs_read_bytes, ret); |
| 367 | |||
| 368 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | ||
| 369 | return ret; | 400 | return ret; |
| 370 | } | 401 | } |
| 371 | 402 | ||
| @@ -747,7 +778,7 @@ xfs_file_dio_aio_write( | |||
| 747 | end = iocb->ki_pos + count - 1; | 778 | end = iocb->ki_pos + count - 1; |
| 748 | 779 | ||
| 749 | /* | 780 | /* |
| 750 | * See xfs_file_read_iter() for why we do a full-file flush here. | 781 | * See xfs_file_dio_aio_read() for why we do a full-file flush here. |
| 751 | */ | 782 | */ |
| 752 | if (mapping->nrpages) { | 783 | if (mapping->nrpages) { |
| 753 | ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); | 784 | ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); |
