aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_lrw.c
diff options
context:
space:
mode:
authorLachlan McIlroy <lachlan@sgi.com>2007-10-11 03:34:33 -0400
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-07 00:44:14 -0500
commit541d7d3c4b31e2b0ac846fe6d2eb5cdbe1353095 (patch)
treed8c9cf9cf75fd3d23ebc19e5f6b646a4d807b72c /fs/xfs/linux-2.6/xfs_lrw.c
parent21a62542b6d7f726d6c1d2cfbfa084f721ba4a26 (diff)
[XFS] kill unnessecary ioops indirection
Currently there is an indirection called ioops in the XFS data I/O path. Various functions are called by functions pointers, but there is no coherence in what this is for, and of course for XFS itself it's entirely unused. This patch removes it instead and significantly reduces source and binary size of XFS while making maintaince easier. SGI-PV: 970841 SGI-Modid: xfs-linux-melb:xfs-kern:29737a Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_lrw.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c56
1 files changed, 17 insertions, 39 deletions
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index d6a8dddb2268..0abc7d0586c9 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -131,7 +131,7 @@ xfs_inval_cached_trace(
131 */ 131 */
132STATIC int 132STATIC int
133xfs_iozero( 133xfs_iozero(
134 struct inode *ip, /* inode */ 134 struct xfs_inode *ip, /* inode */
135 loff_t pos, /* offset in file */ 135 loff_t pos, /* offset in file */
136 size_t count) /* size of data to zero */ 136 size_t count) /* size of data to zero */
137{ 137{
@@ -139,7 +139,7 @@ xfs_iozero(
139 struct address_space *mapping; 139 struct address_space *mapping;
140 int status; 140 int status;
141 141
142 mapping = ip->i_mapping; 142 mapping = ip->i_vnode->i_mapping;
143 do { 143 do {
144 unsigned offset, bytes; 144 unsigned offset, bytes;
145 void *fsdata; 145 void *fsdata;
@@ -389,20 +389,19 @@ xfs_splice_write(
389 */ 389 */
390STATIC int /* error (positive) */ 390STATIC int /* error (positive) */
391xfs_zero_last_block( 391xfs_zero_last_block(
392 struct inode *ip, 392 xfs_inode_t *ip,
393 xfs_iocore_t *io,
394 xfs_fsize_t offset, 393 xfs_fsize_t offset,
395 xfs_fsize_t isize) 394 xfs_fsize_t isize)
396{ 395{
397 xfs_fileoff_t last_fsb; 396 xfs_fileoff_t last_fsb;
398 xfs_mount_t *mp = io->io_mount; 397 xfs_mount_t *mp = ip->i_mount;
399 int nimaps; 398 int nimaps;
400 int zero_offset; 399 int zero_offset;
401 int zero_len; 400 int zero_len;
402 int error = 0; 401 int error = 0;
403 xfs_bmbt_irec_t imap; 402 xfs_bmbt_irec_t imap;
404 403
405 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); 404 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
406 405
407 zero_offset = XFS_B_FSB_OFFSET(mp, isize); 406 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
408 if (zero_offset == 0) { 407 if (zero_offset == 0) {
@@ -415,7 +414,7 @@ xfs_zero_last_block(
415 414
416 last_fsb = XFS_B_TO_FSBT(mp, isize); 415 last_fsb = XFS_B_TO_FSBT(mp, isize);
417 nimaps = 1; 416 nimaps = 1;
418 error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap, 417 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
419 &nimaps, NULL, NULL); 418 &nimaps, NULL, NULL);
420 if (error) { 419 if (error) {
421 return error; 420 return error;
@@ -433,14 +432,14 @@ xfs_zero_last_block(
433 * out sync. We need to drop the ilock while we do this so we 432 * out sync. We need to drop the ilock while we do this so we
434 * don't deadlock when the buffer cache calls back to us. 433 * don't deadlock when the buffer cache calls back to us.
435 */ 434 */
436 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); 435 xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
437 436
438 zero_len = mp->m_sb.sb_blocksize - zero_offset; 437 zero_len = mp->m_sb.sb_blocksize - zero_offset;
439 if (isize + zero_len > offset) 438 if (isize + zero_len > offset)
440 zero_len = offset - isize; 439 zero_len = offset - isize;
441 error = xfs_iozero(ip, isize, zero_len); 440 error = xfs_iozero(ip, isize, zero_len);
442 441
443 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 442 xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
444 ASSERT(error >= 0); 443 ASSERT(error >= 0);
445 return error; 444 return error;
446} 445}
@@ -458,12 +457,11 @@ xfs_zero_last_block(
458 457
459int /* error (positive) */ 458int /* error (positive) */
460xfs_zero_eof( 459xfs_zero_eof(
461 bhv_vnode_t *vp, 460 xfs_inode_t *ip,
462 xfs_iocore_t *io,
463 xfs_off_t offset, /* starting I/O offset */ 461 xfs_off_t offset, /* starting I/O offset */
464 xfs_fsize_t isize) /* current inode size */ 462 xfs_fsize_t isize) /* current inode size */
465{ 463{
466 struct inode *ip = vn_to_inode(vp); 464 xfs_iocore_t *io = &ip->i_iocore;
467 xfs_fileoff_t start_zero_fsb; 465 xfs_fileoff_t start_zero_fsb;
468 xfs_fileoff_t end_zero_fsb; 466 xfs_fileoff_t end_zero_fsb;
469 xfs_fileoff_t zero_count_fsb; 467 xfs_fileoff_t zero_count_fsb;
@@ -483,7 +481,7 @@ xfs_zero_eof(
483 * First handle zeroing the block on which isize resides. 481 * First handle zeroing the block on which isize resides.
484 * We only zero a part of that block so it is handled specially. 482 * We only zero a part of that block so it is handled specially.
485 */ 483 */
486 error = xfs_zero_last_block(ip, io, offset, isize); 484 error = xfs_zero_last_block(ip, offset, isize);
487 if (error) { 485 if (error) {
488 ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); 486 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
489 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); 487 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
@@ -514,7 +512,7 @@ xfs_zero_eof(
514 while (start_zero_fsb <= end_zero_fsb) { 512 while (start_zero_fsb <= end_zero_fsb) {
515 nimaps = 1; 513 nimaps = 1;
516 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 514 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
517 error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb, 515 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
518 0, NULL, 0, &imap, &nimaps, NULL, NULL); 516 0, NULL, 0, &imap, &nimaps, NULL, NULL);
519 if (error) { 517 if (error) {
520 ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); 518 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
@@ -542,7 +540,7 @@ xfs_zero_eof(
542 * Drop the inode lock while we're doing the I/O. 540 * Drop the inode lock while we're doing the I/O.
543 * We'll still have the iolock to protect us. 541 * We'll still have the iolock to protect us.
544 */ 542 */
545 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 543 xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
546 544
547 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); 545 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
548 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); 546 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
@@ -558,14 +556,13 @@ xfs_zero_eof(
558 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 556 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
559 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 557 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
560 558
561 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 559 xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
562 } 560 }
563 561
564 return 0; 562 return 0;
565 563
566out_lock: 564out_lock:
567 565 xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
568 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
569 ASSERT(error >= 0); 566 ASSERT(error >= 0);
570 return error; 567 return error;
571} 568}
@@ -706,7 +703,7 @@ start:
706 */ 703 */
707 704
708 if (pos > xip->i_size) { 705 if (pos > xip->i_size) {
709 error = xfs_zero_eof(vp, io, pos, xip->i_size); 706 error = xfs_zero_eof(xip, pos, xip->i_size);
710 if (error) { 707 if (error) {
711 xfs_iunlock(xip, XFS_ILOCK_EXCL); 708 xfs_iunlock(xip, XFS_ILOCK_EXCL);
712 goto out_unlock_internal; 709 goto out_unlock_internal;
@@ -751,7 +748,7 @@ retry:
751 748
752 if (need_i_mutex) { 749 if (need_i_mutex) {
753 /* demote the lock now the cached pages are gone */ 750 /* demote the lock now the cached pages are gone */
754 XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); 751 xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
755 mutex_unlock(&inode->i_mutex); 752 mutex_unlock(&inode->i_mutex);
756 753
757 iolock = XFS_IOLOCK_SHARED; 754 iolock = XFS_IOLOCK_SHARED;
@@ -894,25 +891,6 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
894 } 891 }
895} 892}
896 893
897
898int
899xfs_bmap(
900 xfs_inode_t *ip,
901 xfs_off_t offset,
902 ssize_t count,
903 int flags,
904 xfs_iomap_t *iomapp,
905 int *niomaps)
906{
907 xfs_iocore_t *io = &ip->i_iocore;
908
909 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
910 ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
911 ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
912
913 return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
914}
915
916/* 894/*
917 * Wrapper around bdstrat so that we can stop data 895 * Wrapper around bdstrat so that we can stop data
918 * from going to disk in case we are shutting down the filesystem. 896 * from going to disk in case we are shutting down the filesystem.