aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_vnodeops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_vnodeops.c')
-rw-r--r--fs/xfs/xfs_vnodeops.c294
1 files changed, 71 insertions, 223 deletions
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index b572f7e840e0..9d376be0ea38 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -53,6 +53,7 @@
53#include "xfs_log_priv.h" 53#include "xfs_log_priv.h"
54#include "xfs_filestream.h" 54#include "xfs_filestream.h"
55#include "xfs_vnodeops.h" 55#include "xfs_vnodeops.h"
56#include "xfs_trace.h"
56 57
57int 58int
58xfs_setattr( 59xfs_setattr(
@@ -69,7 +70,6 @@ xfs_setattr(
69 uint commit_flags=0; 70 uint commit_flags=0;
70 uid_t uid=0, iuid=0; 71 uid_t uid=0, iuid=0;
71 gid_t gid=0, igid=0; 72 gid_t gid=0, igid=0;
72 int timeflags = 0;
73 struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; 73 struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
74 int need_iolock = 1; 74 int need_iolock = 1;
75 75
@@ -134,16 +134,13 @@ xfs_setattr(
134 if (flags & XFS_ATTR_NOLOCK) 134 if (flags & XFS_ATTR_NOLOCK)
135 need_iolock = 0; 135 need_iolock = 0;
136 if (!(mask & ATTR_SIZE)) { 136 if (!(mask & ATTR_SIZE)) {
137 if ((mask != (ATTR_CTIME|ATTR_ATIME|ATTR_MTIME)) || 137 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
138 (mp->m_flags & XFS_MOUNT_WSYNC)) { 138 commit_flags = 0;
139 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); 139 code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp),
140 commit_flags = 0; 140 0, 0, 0);
141 if ((code = xfs_trans_reserve(tp, 0, 141 if (code) {
142 XFS_ICHANGE_LOG_RES(mp), 0, 142 lock_flags = 0;
143 0, 0))) { 143 goto error_return;
144 lock_flags = 0;
145 goto error_return;
146 }
147 } 144 }
148 } else { 145 } else {
149 if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) && 146 if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
@@ -259,7 +256,7 @@ xfs_setattr(
259 iattr->ia_size > ip->i_d.di_size) { 256 iattr->ia_size > ip->i_d.di_size) {
260 code = xfs_flush_pages(ip, 257 code = xfs_flush_pages(ip,
261 ip->i_d.di_size, iattr->ia_size, 258 ip->i_d.di_size, iattr->ia_size,
262 XFS_B_ASYNC, FI_NONE); 259 XBF_ASYNC, FI_NONE);
263 } 260 }
264 261
265 /* wait for all I/O to complete */ 262 /* wait for all I/O to complete */
@@ -294,15 +291,23 @@ xfs_setattr(
294 * or we are explicitly asked to change it. This handles 291 * or we are explicitly asked to change it. This handles
295 * the semantic difference between truncate() and ftruncate() 292 * the semantic difference between truncate() and ftruncate()
296 * as implemented in the VFS. 293 * as implemented in the VFS.
294 *
295 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME
296 * is a special case where we need to update the times despite
297 * not having these flags set. For all other operations the
298 * VFS set these flags explicitly if it wants a timestamp
299 * update.
297 */ 300 */
298 if (iattr->ia_size != ip->i_size || (mask & ATTR_CTIME)) 301 if (iattr->ia_size != ip->i_size &&
299 timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 302 (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
303 iattr->ia_ctime = iattr->ia_mtime =
304 current_fs_time(inode->i_sb);
305 mask |= ATTR_CTIME | ATTR_MTIME;
306 }
300 307
301 if (iattr->ia_size > ip->i_size) { 308 if (iattr->ia_size > ip->i_size) {
302 ip->i_d.di_size = iattr->ia_size; 309 ip->i_d.di_size = iattr->ia_size;
303 ip->i_size = iattr->ia_size; 310 ip->i_size = iattr->ia_size;
304 if (!(flags & XFS_ATTR_DMI))
305 xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
306 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 311 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
307 } else if (iattr->ia_size <= ip->i_size || 312 } else if (iattr->ia_size <= ip->i_size ||
308 (iattr->ia_size == 0 && ip->i_d.di_nextents)) { 313 (iattr->ia_size == 0 && ip->i_d.di_nextents)) {
@@ -373,9 +378,6 @@ xfs_setattr(
373 ip->i_d.di_gid = gid; 378 ip->i_d.di_gid = gid;
374 inode->i_gid = gid; 379 inode->i_gid = gid;
375 } 380 }
376
377 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
378 timeflags |= XFS_ICHGTIME_CHG;
379 } 381 }
380 382
381 /* 383 /*
@@ -392,51 +394,37 @@ xfs_setattr(
392 394
393 inode->i_mode &= S_IFMT; 395 inode->i_mode &= S_IFMT;
394 inode->i_mode |= mode & ~S_IFMT; 396 inode->i_mode |= mode & ~S_IFMT;
395
396 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
397 timeflags |= XFS_ICHGTIME_CHG;
398 } 397 }
399 398
400 /* 399 /*
401 * Change file access or modified times. 400 * Change file access or modified times.
402 */ 401 */
403 if (mask & (ATTR_ATIME|ATTR_MTIME)) { 402 if (mask & ATTR_ATIME) {
404 if (mask & ATTR_ATIME) { 403 inode->i_atime = iattr->ia_atime;
405 inode->i_atime = iattr->ia_atime; 404 ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
406 ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; 405 ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
407 ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; 406 ip->i_update_core = 1;
408 ip->i_update_core = 1;
409 }
410 if (mask & ATTR_MTIME) {
411 inode->i_mtime = iattr->ia_mtime;
412 ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
413 ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
414 timeflags &= ~XFS_ICHGTIME_MOD;
415 timeflags |= XFS_ICHGTIME_CHG;
416 }
417 if (tp && (mask & (ATTR_MTIME_SET|ATTR_ATIME_SET)))
418 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
419 } 407 }
420 408 if (mask & ATTR_CTIME) {
421 /*
422 * Change file inode change time only if ATTR_CTIME set
423 * AND we have been called by a DMI function.
424 */
425
426 if ((flags & XFS_ATTR_DMI) && (mask & ATTR_CTIME)) {
427 inode->i_ctime = iattr->ia_ctime; 409 inode->i_ctime = iattr->ia_ctime;
428 ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; 410 ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
429 ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; 411 ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
430 ip->i_update_core = 1; 412 ip->i_update_core = 1;
431 timeflags &= ~XFS_ICHGTIME_CHG; 413 }
414 if (mask & ATTR_MTIME) {
415 inode->i_mtime = iattr->ia_mtime;
416 ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
417 ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
418 ip->i_update_core = 1;
432 } 419 }
433 420
434 /* 421 /*
435 * Send out timestamp changes that need to be set to the 422 * And finally, log the inode core if any attribute in it
436 * current time. Not done when called by a DMI function. 423 * has been changed.
437 */ 424 */
438 if (timeflags && !(flags & XFS_ATTR_DMI)) 425 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE|
439 xfs_ichgtime(ip, timeflags); 426 ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
427 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
440 428
441 XFS_STATS_INC(xs_ig_attrchg); 429 XFS_STATS_INC(xs_ig_attrchg);
442 430
@@ -451,12 +439,10 @@ xfs_setattr(
451 * mix so this probably isn't worth the trouble to optimize. 439 * mix so this probably isn't worth the trouble to optimize.
452 */ 440 */
453 code = 0; 441 code = 0;
454 if (tp) { 442 if (mp->m_flags & XFS_MOUNT_WSYNC)
455 if (mp->m_flags & XFS_MOUNT_WSYNC) 443 xfs_trans_set_sync(tp);
456 xfs_trans_set_sync(tp);
457 444
458 code = xfs_trans_commit(tp, commit_flags); 445 code = xfs_trans_commit(tp, commit_flags);
459 }
460 446
461 xfs_iunlock(ip, lock_flags); 447 xfs_iunlock(ip, lock_flags);
462 448
@@ -538,9 +524,8 @@ xfs_readlink_bmap(
538 d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); 524 d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
539 byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); 525 byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
540 526
541 bp = xfs_buf_read_flags(mp->m_ddev_targp, d, BTOBB(byte_cnt), 527 bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt),
542 XBF_LOCK | XBF_MAPPED | 528 XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK);
543 XBF_DONT_BLOCK);
544 error = XFS_BUF_GETERROR(bp); 529 error = XFS_BUF_GETERROR(bp);
545 if (error) { 530 if (error) {
546 xfs_ioerror_alert("xfs_readlink", 531 xfs_ioerror_alert("xfs_readlink",
@@ -599,114 +584,9 @@ xfs_readlink(
599} 584}
600 585
601/* 586/*
602 * xfs_fsync 587 * Flags for xfs_free_eofblocks
603 *
604 * This is called to sync the inode and its data out to disk. We need to hold
605 * the I/O lock while flushing the data, and the inode lock while flushing the
606 * inode. The inode lock CANNOT be held while flushing the data, so acquire
607 * after we're done with that.
608 */ 588 */
609int 589#define XFS_FREE_EOF_TRYLOCK (1<<0)
610xfs_fsync(
611 xfs_inode_t *ip)
612{
613 xfs_trans_t *tp;
614 int error = 0;
615 int log_flushed = 0, changed = 1;
616
617 xfs_itrace_entry(ip);
618
619 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
620 return XFS_ERROR(EIO);
621
622 /*
623 * We always need to make sure that the required inode state is safe on
624 * disk. The inode might be clean but we still might need to force the
625 * log because of committed transactions that haven't hit the disk yet.
626 * Likewise, there could be unflushed non-transactional changes to the
627 * inode core that have to go to disk and this requires us to issue
628 * a synchronous transaction to capture these changes correctly.
629 *
630 * This code relies on the assumption that if the update_* fields
631 * of the inode are clear and the inode is unpinned then it is clean
632 * and no action is required.
633 */
634 xfs_ilock(ip, XFS_ILOCK_SHARED);
635
636 if (!ip->i_update_core) {
637 /*
638 * Timestamps/size haven't changed since last inode flush or
639 * inode transaction commit. That means either nothing got
640 * written or a transaction committed which caught the updates.
641 * If the latter happened and the transaction hasn't hit the
642 * disk yet, the inode will be still be pinned. If it is,
643 * force the log.
644 */
645
646 xfs_iunlock(ip, XFS_ILOCK_SHARED);
647
648 if (xfs_ipincount(ip)) {
649 error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
650 XFS_LOG_FORCE | XFS_LOG_SYNC,
651 &log_flushed);
652 } else {
653 /*
654 * If the inode is not pinned and nothing has changed
655 * we don't need to flush the cache.
656 */
657 changed = 0;
658 }
659 } else {
660 /*
661 * Kick off a transaction to log the inode core to get the
662 * updates. The sync transaction will also force the log.
663 */
664 xfs_iunlock(ip, XFS_ILOCK_SHARED);
665 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
666 error = xfs_trans_reserve(tp, 0,
667 XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
668 if (error) {
669 xfs_trans_cancel(tp, 0);
670 return error;
671 }
672 xfs_ilock(ip, XFS_ILOCK_EXCL);
673
674 /*
675 * Note - it's possible that we might have pushed ourselves out
676 * of the way during trans_reserve which would flush the inode.
677 * But there's no guarantee that the inode buffer has actually
678 * gone out yet (it's delwri). Plus the buffer could be pinned
679 * anyway if it's part of an inode in another recent
680 * transaction. So we play it safe and fire off the
681 * transaction anyway.
682 */
683 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
684 xfs_trans_ihold(tp, ip);
685 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
686 xfs_trans_set_sync(tp);
687 error = _xfs_trans_commit(tp, 0, &log_flushed);
688
689 xfs_iunlock(ip, XFS_ILOCK_EXCL);
690 }
691
692 if ((ip->i_mount->m_flags & XFS_MOUNT_BARRIER) && changed) {
693 /*
694 * If the log write didn't issue an ordered tag we need
695 * to flush the disk cache for the data device now.
696 */
697 if (!log_flushed)
698 xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp);
699
700 /*
701 * If this inode is on the RT dev we need to flush that
702 * cache as well.
703 */
704 if (XFS_IS_REALTIME_INODE(ip))
705 xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
706 }
707
708 return error;
709}
710 590
711/* 591/*
712 * This is called by xfs_inactive to free any blocks beyond eof 592 * This is called by xfs_inactive to free any blocks beyond eof
@@ -726,7 +606,6 @@ xfs_free_eofblocks(
726 xfs_filblks_t map_len; 606 xfs_filblks_t map_len;
727 int nimaps; 607 int nimaps;
728 xfs_bmbt_irec_t imap; 608 xfs_bmbt_irec_t imap;
729 int use_iolock = (flags & XFS_FREE_EOF_LOCK);
730 609
731 /* 610 /*
732 * Figure out if there are any blocks beyond the end 611 * Figure out if there are any blocks beyond the end
@@ -768,14 +647,19 @@ xfs_free_eofblocks(
768 * cache and we can't 647 * cache and we can't
769 * do that within a transaction. 648 * do that within a transaction.
770 */ 649 */
771 if (use_iolock) 650 if (flags & XFS_FREE_EOF_TRYLOCK) {
651 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
652 xfs_trans_cancel(tp, 0);
653 return 0;
654 }
655 } else {
772 xfs_ilock(ip, XFS_IOLOCK_EXCL); 656 xfs_ilock(ip, XFS_IOLOCK_EXCL);
657 }
773 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 658 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
774 ip->i_size); 659 ip->i_size);
775 if (error) { 660 if (error) {
776 xfs_trans_cancel(tp, 0); 661 xfs_trans_cancel(tp, 0);
777 if (use_iolock) 662 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
778 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
779 return error; 663 return error;
780 } 664 }
781 665
@@ -812,8 +696,7 @@ xfs_free_eofblocks(
812 error = xfs_trans_commit(tp, 696 error = xfs_trans_commit(tp,
813 XFS_TRANS_RELEASE_LOG_RES); 697 XFS_TRANS_RELEASE_LOG_RES);
814 } 698 }
815 xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL) 699 xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL);
816 : XFS_ILOCK_EXCL));
817 } 700 }
818 return error; 701 return error;
819} 702}
@@ -1103,7 +986,7 @@ xfs_release(
1103 */ 986 */
1104 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 987 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1105 if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) 988 if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
1106 xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE); 989 xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
1107 } 990 }
1108 991
1109 if (ip->i_d.di_nlink != 0) { 992 if (ip->i_d.di_nlink != 0) {
@@ -1113,7 +996,17 @@ xfs_release(
1113 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 996 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
1114 (!(ip->i_d.di_flags & 997 (!(ip->i_d.di_flags &
1115 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { 998 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
1116 error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); 999
1000 /*
1001 * If we can't get the iolock just skip truncating
1002 * the blocks past EOF because we could deadlock
1003 * with the mmap_sem otherwise. We'll get another
1004 * chance to drop them once the last reference to
1005 * the inode is dropped, so we'll never leak blocks
1006 * permanently.
1007 */
1008 error = xfs_free_eofblocks(mp, ip,
1009 XFS_FREE_EOF_TRYLOCK);
1117 if (error) 1010 if (error)
1118 return error; 1011 return error;
1119 } 1012 }
@@ -1184,7 +1077,7 @@ xfs_inactive(
1184 (!(ip->i_d.di_flags & 1077 (!(ip->i_d.di_flags &
1185 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || 1078 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
1186 (ip->i_delayed_blks != 0)))) { 1079 (ip->i_delayed_blks != 0)))) {
1187 error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); 1080 error = xfs_free_eofblocks(mp, ip, 0);
1188 if (error) 1081 if (error)
1189 return VN_INACTIVE_CACHE; 1082 return VN_INACTIVE_CACHE;
1190 } 1083 }
@@ -1380,7 +1273,6 @@ xfs_lookup(
1380 if (error) 1273 if (error)
1381 goto out_free_name; 1274 goto out_free_name;
1382 1275
1383 xfs_itrace_ref(*ipp);
1384 return 0; 1276 return 0;
1385 1277
1386out_free_name: 1278out_free_name:
@@ -1526,7 +1418,6 @@ xfs_create(
1526 * At this point, we've gotten a newly allocated inode. 1418 * At this point, we've gotten a newly allocated inode.
1527 * It is locked (and joined to the transaction). 1419 * It is locked (and joined to the transaction).
1528 */ 1420 */
1529 xfs_itrace_ref(ip);
1530 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1421 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1531 1422
1532 /* 1423 /*
@@ -1986,9 +1877,6 @@ xfs_remove(
1986 if (!is_dir && link_zero && xfs_inode_is_filestream(ip)) 1877 if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
1987 xfs_filestream_deassociate(ip); 1878 xfs_filestream_deassociate(ip);
1988 1879
1989 xfs_itrace_exit(ip);
1990 xfs_itrace_exit(dp);
1991
1992 std_return: 1880 std_return:
1993 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { 1881 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
1994 XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL, 1882 XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL,
@@ -2201,7 +2089,8 @@ xfs_symlink(
2201 if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) { 2089 if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) {
2202 error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp, 2090 error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp,
2203 DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, 2091 DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
2204 link_name->name, target_path, 0, 0, 0); 2092 link_name->name,
2093 (unsigned char *)target_path, 0, 0, 0);
2205 if (error) 2094 if (error)
2206 return error; 2095 return error;
2207 } 2096 }
@@ -2285,7 +2174,6 @@ xfs_symlink(
2285 goto error_return; 2174 goto error_return;
2286 goto error1; 2175 goto error1;
2287 } 2176 }
2288 xfs_itrace_ref(ip);
2289 2177
2290 /* 2178 /*
2291 * An error after we've joined dp to the transaction will result in the 2179 * An error after we've joined dp to the transaction will result in the
@@ -2398,7 +2286,8 @@ std_return:
2398 dp, DM_RIGHT_NULL, 2286 dp, DM_RIGHT_NULL,
2399 error ? NULL : ip, 2287 error ? NULL : ip,
2400 DM_RIGHT_NULL, link_name->name, 2288 DM_RIGHT_NULL, link_name->name,
2401 target_path, 0, error, 0); 2289 (unsigned char *)target_path,
2290 0, error, 0);
2402 } 2291 }
2403 2292
2404 if (!error) 2293 if (!error)
@@ -2456,46 +2345,6 @@ xfs_set_dmattrs(
2456 return error; 2345 return error;
2457} 2346}
2458 2347
2459int
2460xfs_reclaim(
2461 xfs_inode_t *ip)
2462{
2463
2464 xfs_itrace_entry(ip);
2465
2466 ASSERT(!VN_MAPPED(VFS_I(ip)));
2467
2468 /* bad inode, get out here ASAP */
2469 if (is_bad_inode(VFS_I(ip))) {
2470 xfs_ireclaim(ip);
2471 return 0;
2472 }
2473
2474 xfs_ioend_wait(ip);
2475
2476 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
2477
2478 /*
2479 * If we have nothing to flush with this inode then complete the
2480 * teardown now, otherwise break the link between the xfs inode and the
2481 * linux inode and clean up the xfs inode later. This avoids flushing
2482 * the inode to disk during the delete operation itself.
2483 *
2484 * When breaking the link, we need to set the XFS_IRECLAIMABLE flag
2485 * first to ensure that xfs_iunpin() will never see an xfs inode
2486 * that has a linux inode being reclaimed. Synchronisation is provided
2487 * by the i_flags_lock.
2488 */
2489 if (!ip->i_update_core && (ip->i_itemp == NULL)) {
2490 xfs_ilock(ip, XFS_ILOCK_EXCL);
2491 xfs_iflock(ip);
2492 xfs_iflags_set(ip, XFS_IRECLAIMABLE);
2493 return xfs_reclaim_inode(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC);
2494 }
2495 xfs_inode_set_reclaim_tag(ip);
2496 return 0;
2497}
2498
2499/* 2348/*
2500 * xfs_alloc_file_space() 2349 * xfs_alloc_file_space()
2501 * This routine allocates disk space for the given file. 2350 * This routine allocates disk space for the given file.
@@ -2868,7 +2717,6 @@ xfs_free_file_space(
2868 ioffset = offset & ~(rounding - 1); 2717 ioffset = offset & ~(rounding - 1);
2869 2718
2870 if (VN_CACHED(VFS_I(ip)) != 0) { 2719 if (VN_CACHED(VFS_I(ip)) != 0) {
2871 xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1);
2872 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); 2720 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
2873 if (error) 2721 if (error)
2874 goto out_unlock_iolock; 2722 goto out_unlock_iolock;