aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_inode.c')
-rw-r--r--fs/xfs/xfs_inode.c278
1 files changed, 68 insertions, 210 deletions
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b92a4fa2a0a1..0ffd56447045 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -47,10 +47,10 @@
47#include "xfs_rw.h" 47#include "xfs_rw.h"
48#include "xfs_error.h" 48#include "xfs_error.h"
49#include "xfs_utils.h" 49#include "xfs_utils.h"
50#include "xfs_dir2_trace.h"
51#include "xfs_quota.h" 50#include "xfs_quota.h"
52#include "xfs_filestream.h" 51#include "xfs_filestream.h"
53#include "xfs_vnodeops.h" 52#include "xfs_vnodeops.h"
53#include "xfs_trace.h"
54 54
55kmem_zone_t *xfs_ifork_zone; 55kmem_zone_t *xfs_ifork_zone;
56kmem_zone_t *xfs_inode_zone; 56kmem_zone_t *xfs_inode_zone;
@@ -151,7 +151,7 @@ xfs_imap_to_bp(
151 "an error %d on %s. Returning error.", 151 "an error %d on %s. Returning error.",
152 error, mp->m_fsname); 152 error, mp->m_fsname);
153 } else { 153 } else {
154 ASSERT(buf_flags & XFS_BUF_TRYLOCK); 154 ASSERT(buf_flags & XBF_TRYLOCK);
155 } 155 }
156 return error; 156 return error;
157 } 157 }
@@ -239,7 +239,7 @@ xfs_inotobp(
239 if (error) 239 if (error)
240 return error; 240 return error;
241 241
242 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags); 242 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags);
243 if (error) 243 if (error)
244 return error; 244 return error;
245 245
@@ -285,7 +285,7 @@ xfs_itobp(
285 return error; 285 return error;
286 286
287 if (!bp) { 287 if (!bp) {
288 ASSERT(buf_flags & XFS_BUF_TRYLOCK); 288 ASSERT(buf_flags & XBF_TRYLOCK);
289 ASSERT(tp == NULL); 289 ASSERT(tp == NULL);
290 *bpp = NULL; 290 *bpp = NULL;
291 return EAGAIN; 291 return EAGAIN;
@@ -807,7 +807,7 @@ xfs_iread(
807 * Get pointers to the on-disk inode and the buffer containing it. 807 * Get pointers to the on-disk inode and the buffer containing it.
808 */ 808 */
809 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 809 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
810 XFS_BUF_LOCK, iget_flags); 810 XBF_LOCK, iget_flags);
811 if (error) 811 if (error)
812 return error; 812 return error;
813 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 813 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
@@ -1291,42 +1291,6 @@ xfs_file_last_byte(
1291 return last_byte; 1291 return last_byte;
1292} 1292}
1293 1293
1294#if defined(XFS_RW_TRACE)
1295STATIC void
1296xfs_itrunc_trace(
1297 int tag,
1298 xfs_inode_t *ip,
1299 int flag,
1300 xfs_fsize_t new_size,
1301 xfs_off_t toss_start,
1302 xfs_off_t toss_finish)
1303{
1304 if (ip->i_rwtrace == NULL) {
1305 return;
1306 }
1307
1308 ktrace_enter(ip->i_rwtrace,
1309 (void*)((long)tag),
1310 (void*)ip,
1311 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1312 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1313 (void*)((long)flag),
1314 (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1315 (void*)(unsigned long)(new_size & 0xffffffff),
1316 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1317 (void*)(unsigned long)(toss_start & 0xffffffff),
1318 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1319 (void*)(unsigned long)(toss_finish & 0xffffffff),
1320 (void*)(unsigned long)current_cpu(),
1321 (void*)(unsigned long)current_pid(),
1322 (void*)NULL,
1323 (void*)NULL,
1324 (void*)NULL);
1325}
1326#else
1327#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1328#endif
1329
1330/* 1294/*
1331 * Start the truncation of the file to new_size. The new size 1295 * Start the truncation of the file to new_size. The new size
1332 * must be smaller than the current size. This routine will 1296 * must be smaller than the current size. This routine will
@@ -1409,8 +1373,7 @@ xfs_itruncate_start(
1409 return 0; 1373 return 0;
1410 } 1374 }
1411 last_byte = xfs_file_last_byte(ip); 1375 last_byte = xfs_file_last_byte(ip);
1412 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, 1376 trace_xfs_itruncate_start(ip, flags, new_size, toss_start, last_byte);
1413 last_byte);
1414 if (last_byte > toss_start) { 1377 if (last_byte > toss_start) {
1415 if (flags & XFS_ITRUNC_DEFINITE) { 1378 if (flags & XFS_ITRUNC_DEFINITE) {
1416 xfs_tosspages(ip, toss_start, 1379 xfs_tosspages(ip, toss_start,
@@ -1514,7 +1477,8 @@ xfs_itruncate_finish(
1514 new_size = 0LL; 1477 new_size = 0LL;
1515 } 1478 }
1516 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1479 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1517 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); 1480 trace_xfs_itruncate_finish_start(ip, new_size);
1481
1518 /* 1482 /*
1519 * The first thing we do is set the size to new_size permanently 1483 * The first thing we do is set the size to new_size permanently
1520 * on disk. This way we don't have to worry about anyone ever 1484 * on disk. This way we don't have to worry about anyone ever
@@ -1731,7 +1695,7 @@ xfs_itruncate_finish(
1731 ASSERT((new_size != 0) || 1695 ASSERT((new_size != 0) ||
1732 (fork == XFS_ATTR_FORK) || 1696 (fork == XFS_ATTR_FORK) ||
1733 (ip->i_d.di_nextents == 0)); 1697 (ip->i_d.di_nextents == 0));
1734 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); 1698 trace_xfs_itruncate_finish_end(ip, new_size);
1735 return 0; 1699 return 0;
1736} 1700}
1737 1701
@@ -1787,7 +1751,7 @@ xfs_iunlink(
1787 * Here we put the head pointer into our next pointer, 1751 * Here we put the head pointer into our next pointer,
1788 * and then we fall through to point the head at us. 1752 * and then we fall through to point the head at us.
1789 */ 1753 */
1790 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1754 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1791 if (error) 1755 if (error)
1792 return error; 1756 return error;
1793 1757
@@ -1869,7 +1833,7 @@ xfs_iunlink_remove(
1869 * of dealing with the buffer when there is no need to 1833 * of dealing with the buffer when there is no need to
1870 * change it. 1834 * change it.
1871 */ 1835 */
1872 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1836 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1873 if (error) { 1837 if (error) {
1874 cmn_err(CE_WARN, 1838 cmn_err(CE_WARN,
1875 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 1839 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -1931,7 +1895,7 @@ xfs_iunlink_remove(
1931 * Now last_ibp points to the buffer previous to us on 1895 * Now last_ibp points to the buffer previous to us on
1932 * the unlinked list. Pull us from the list. 1896 * the unlinked list. Pull us from the list.
1933 */ 1897 */
1934 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1898 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1935 if (error) { 1899 if (error) {
1936 cmn_err(CE_WARN, 1900 cmn_err(CE_WARN,
1937 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 1901 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -1982,8 +1946,9 @@ xfs_ifree_cluster(
1982 xfs_inode_t *ip, **ip_found; 1946 xfs_inode_t *ip, **ip_found;
1983 xfs_inode_log_item_t *iip; 1947 xfs_inode_log_item_t *iip;
1984 xfs_log_item_t *lip; 1948 xfs_log_item_t *lip;
1985 xfs_perag_t *pag = xfs_get_perag(mp, inum); 1949 struct xfs_perag *pag;
1986 1950
1951 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
1987 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 1952 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
1988 blks_per_cluster = 1; 1953 blks_per_cluster = 1;
1989 ninodes = mp->m_sb.sb_inopblock; 1954 ninodes = mp->m_sb.sb_inopblock;
@@ -2075,7 +2040,7 @@ xfs_ifree_cluster(
2075 2040
2076 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2041 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2077 mp->m_bsize * blks_per_cluster, 2042 mp->m_bsize * blks_per_cluster,
2078 XFS_BUF_LOCK); 2043 XBF_LOCK);
2079 2044
2080 pre_flushed = 0; 2045 pre_flushed = 0;
2081 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 2046 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
@@ -2124,7 +2089,7 @@ xfs_ifree_cluster(
2124 } 2089 }
2125 2090
2126 kmem_free(ip_found); 2091 kmem_free(ip_found);
2127 xfs_put_perag(mp, pag); 2092 xfs_perag_put(pag);
2128} 2093}
2129 2094
2130/* 2095/*
@@ -2186,7 +2151,7 @@ xfs_ifree(
2186 2151
2187 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2152 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2188 2153
2189 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 2154 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK);
2190 if (error) 2155 if (error)
2191 return error; 2156 return error;
2192 2157
@@ -2474,72 +2439,31 @@ xfs_idestroy_fork(
2474} 2439}
2475 2440
2476/* 2441/*
2477 * Increment the pin count of the given buffer. 2442 * This is called to unpin an inode. The caller must have the inode locked
2478 * This value is protected by ipinlock spinlock in the mount structure. 2443 * in at least shared mode so that the buffer cannot be subsequently pinned
2479 */ 2444 * once someone is waiting for it to be unpinned.
2480void
2481xfs_ipin(
2482 xfs_inode_t *ip)
2483{
2484 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2485
2486 atomic_inc(&ip->i_pincount);
2487}
2488
2489/*
2490 * Decrement the pin count of the given inode, and wake up
2491 * anyone in xfs_iwait_unpin() if the count goes to 0. The
2492 * inode must have been previously pinned with a call to xfs_ipin().
2493 */ 2445 */
2494void 2446static void
2495xfs_iunpin( 2447xfs_iunpin_nowait(
2496 xfs_inode_t *ip) 2448 struct xfs_inode *ip)
2497{
2498 ASSERT(atomic_read(&ip->i_pincount) > 0);
2499
2500 if (atomic_dec_and_test(&ip->i_pincount))
2501 wake_up(&ip->i_ipin_wait);
2502}
2503
2504/*
2505 * This is called to unpin an inode. It can be directed to wait or to return
2506 * immediately without waiting for the inode to be unpinned. The caller must
2507 * have the inode locked in at least shared mode so that the buffer cannot be
2508 * subsequently pinned once someone is waiting for it to be unpinned.
2509 */
2510STATIC void
2511__xfs_iunpin_wait(
2512 xfs_inode_t *ip,
2513 int wait)
2514{ 2449{
2515 xfs_inode_log_item_t *iip = ip->i_itemp;
2516
2517 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2450 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2518 if (atomic_read(&ip->i_pincount) == 0)
2519 return;
2520 2451
2521 /* Give the log a push to start the unpinning I/O */ 2452 /* Give the log a push to start the unpinning I/O */
2522 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ? 2453 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
2523 iip->ili_last_lsn : 0, XFS_LOG_FORCE);
2524 if (wait)
2525 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2526}
2527 2454
2528static inline void
2529xfs_iunpin_wait(
2530 xfs_inode_t *ip)
2531{
2532 __xfs_iunpin_wait(ip, 1);
2533} 2455}
2534 2456
2535static inline void 2457void
2536xfs_iunpin_nowait( 2458xfs_iunpin_wait(
2537 xfs_inode_t *ip) 2459 struct xfs_inode *ip)
2538{ 2460{
2539 __xfs_iunpin_wait(ip, 0); 2461 if (xfs_ipincount(ip)) {
2462 xfs_iunpin_nowait(ip);
2463 wait_event(ip->i_ipin_wait, (xfs_ipincount(ip) == 0));
2464 }
2540} 2465}
2541 2466
2542
2543/* 2467/*
2544 * xfs_iextents_copy() 2468 * xfs_iextents_copy()
2545 * 2469 *
@@ -2711,7 +2635,7 @@ xfs_iflush_cluster(
2711 xfs_buf_t *bp) 2635 xfs_buf_t *bp)
2712{ 2636{
2713 xfs_mount_t *mp = ip->i_mount; 2637 xfs_mount_t *mp = ip->i_mount;
2714 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); 2638 struct xfs_perag *pag;
2715 unsigned long first_index, mask; 2639 unsigned long first_index, mask;
2716 unsigned long inodes_per_cluster; 2640 unsigned long inodes_per_cluster;
2717 int ilist_size; 2641 int ilist_size;
@@ -2722,6 +2646,7 @@ xfs_iflush_cluster(
2722 int bufwasdelwri; 2646 int bufwasdelwri;
2723 int i; 2647 int i;
2724 2648
2649 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2725 ASSERT(pag->pagi_inodeok); 2650 ASSERT(pag->pagi_inodeok);
2726 ASSERT(pag->pag_ici_init); 2651 ASSERT(pag->pag_ici_init);
2727 2652
@@ -2729,7 +2654,7 @@ xfs_iflush_cluster(
2729 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); 2654 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2730 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); 2655 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2731 if (!ilist) 2656 if (!ilist)
2732 return 0; 2657 goto out_put;
2733 2658
2734 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2659 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2735 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2660 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
@@ -2798,6 +2723,8 @@ xfs_iflush_cluster(
2798out_free: 2723out_free:
2799 read_unlock(&pag->pag_ici_lock); 2724 read_unlock(&pag->pag_ici_lock);
2800 kmem_free(ilist); 2725 kmem_free(ilist);
2726out_put:
2727 xfs_perag_put(pag);
2801 return 0; 2728 return 0;
2802 2729
2803 2730
@@ -2841,6 +2768,7 @@ cluster_corrupt_out:
2841 */ 2768 */
2842 xfs_iflush_abort(iq); 2769 xfs_iflush_abort(iq);
2843 kmem_free(ilist); 2770 kmem_free(ilist);
2771 xfs_perag_put(pag);
2844 return XFS_ERROR(EFSCORRUPTED); 2772 return XFS_ERROR(EFSCORRUPTED);
2845} 2773}
2846 2774
@@ -2863,8 +2791,6 @@ xfs_iflush(
2863 xfs_dinode_t *dip; 2791 xfs_dinode_t *dip;
2864 xfs_mount_t *mp; 2792 xfs_mount_t *mp;
2865 int error; 2793 int error;
2866 int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
2867 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
2868 2794
2869 XFS_STATS_INC(xs_iflush_count); 2795 XFS_STATS_INC(xs_iflush_count);
2870 2796
@@ -2877,15 +2803,6 @@ xfs_iflush(
2877 mp = ip->i_mount; 2803 mp = ip->i_mount;
2878 2804
2879 /* 2805 /*
2880 * If the inode isn't dirty, then just release the inode
2881 * flush lock and do nothing.
2882 */
2883 if (xfs_inode_clean(ip)) {
2884 xfs_ifunlock(ip);
2885 return 0;
2886 }
2887
2888 /*
2889 * We can't flush the inode until it is unpinned, so wait for it if we 2806 * We can't flush the inode until it is unpinned, so wait for it if we
2890 * are allowed to block. We know noone new can pin it, because we are 2807 * are allowed to block. We know noone new can pin it, because we are
2891 * holding the inode lock shared and you need to hold it exclusively to 2808 * holding the inode lock shared and you need to hold it exclusively to
@@ -2896,7 +2813,7 @@ xfs_iflush(
2896 * in the same cluster are dirty, they will probably write the inode 2813 * in the same cluster are dirty, they will probably write the inode
2897 * out for us if they occur after the log force completes. 2814 * out for us if they occur after the log force completes.
2898 */ 2815 */
2899 if (noblock && xfs_ipincount(ip)) { 2816 if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
2900 xfs_iunpin_nowait(ip); 2817 xfs_iunpin_nowait(ip);
2901 xfs_ifunlock(ip); 2818 xfs_ifunlock(ip);
2902 return EAGAIN; 2819 return EAGAIN;
@@ -2904,6 +2821,19 @@ xfs_iflush(
2904 xfs_iunpin_wait(ip); 2821 xfs_iunpin_wait(ip);
2905 2822
2906 /* 2823 /*
2824 * For stale inodes we cannot rely on the backing buffer remaining
2825 * stale in cache for the remaining life of the stale inode and so
2826 * xfs_itobp() below may give us a buffer that no longer contains
2827 * inodes below. We have to check this after ensuring the inode is
2828 * unpinned so that it is safe to reclaim the stale inode after the
2829 * flush call.
2830 */
2831 if (xfs_iflags_test(ip, XFS_ISTALE)) {
2832 xfs_ifunlock(ip);
2833 return 0;
2834 }
2835
2836 /*
2907 * This may have been unpinned because the filesystem is shutting 2837 * This may have been unpinned because the filesystem is shutting
2908 * down forcibly. If that's the case we must not write this inode 2838 * down forcibly. If that's the case we must not write this inode
2909 * to disk, because the log record didn't make it to disk! 2839 * to disk, because the log record didn't make it to disk!
@@ -2917,60 +2847,10 @@ xfs_iflush(
2917 } 2847 }
2918 2848
2919 /* 2849 /*
2920 * Decide how buffer will be flushed out. This is done before
2921 * the call to xfs_iflush_int because this field is zeroed by it.
2922 */
2923 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
2924 /*
2925 * Flush out the inode buffer according to the directions
2926 * of the caller. In the cases where the caller has given
2927 * us a choice choose the non-delwri case. This is because
2928 * the inode is in the AIL and we need to get it out soon.
2929 */
2930 switch (flags) {
2931 case XFS_IFLUSH_SYNC:
2932 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
2933 flags = 0;
2934 break;
2935 case XFS_IFLUSH_ASYNC_NOBLOCK:
2936 case XFS_IFLUSH_ASYNC:
2937 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
2938 flags = INT_ASYNC;
2939 break;
2940 case XFS_IFLUSH_DELWRI:
2941 flags = INT_DELWRI;
2942 break;
2943 default:
2944 ASSERT(0);
2945 flags = 0;
2946 break;
2947 }
2948 } else {
2949 switch (flags) {
2950 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
2951 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
2952 case XFS_IFLUSH_DELWRI:
2953 flags = INT_DELWRI;
2954 break;
2955 case XFS_IFLUSH_ASYNC_NOBLOCK:
2956 case XFS_IFLUSH_ASYNC:
2957 flags = INT_ASYNC;
2958 break;
2959 case XFS_IFLUSH_SYNC:
2960 flags = 0;
2961 break;
2962 default:
2963 ASSERT(0);
2964 flags = 0;
2965 break;
2966 }
2967 }
2968
2969 /*
2970 * Get the buffer containing the on-disk inode. 2850 * Get the buffer containing the on-disk inode.
2971 */ 2851 */
2972 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 2852 error = xfs_itobp(mp, NULL, ip, &dip, &bp,
2973 noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK); 2853 (flags & SYNC_WAIT) ? XBF_LOCK : XBF_TRYLOCK);
2974 if (error || !bp) { 2854 if (error || !bp) {
2975 xfs_ifunlock(ip); 2855 xfs_ifunlock(ip);
2976 return error; 2856 return error;
@@ -2988,7 +2868,7 @@ xfs_iflush(
2988 * get stuck waiting in the write for too long. 2868 * get stuck waiting in the write for too long.
2989 */ 2869 */
2990 if (XFS_BUF_ISPINNED(bp)) 2870 if (XFS_BUF_ISPINNED(bp))
2991 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 2871 xfs_log_force(mp, 0);
2992 2872
2993 /* 2873 /*
2994 * inode clustering: 2874 * inode clustering:
@@ -2998,13 +2878,10 @@ xfs_iflush(
2998 if (error) 2878 if (error)
2999 goto cluster_corrupt_out; 2879 goto cluster_corrupt_out;
3000 2880
3001 if (flags & INT_DELWRI) { 2881 if (flags & SYNC_WAIT)
3002 xfs_bdwrite(mp, bp);
3003 } else if (flags & INT_ASYNC) {
3004 error = xfs_bawrite(mp, bp);
3005 } else {
3006 error = xfs_bwrite(mp, bp); 2882 error = xfs_bwrite(mp, bp);
3007 } 2883 else
2884 xfs_bdwrite(mp, bp);
3008 return error; 2885 return error;
3009 2886
3010corrupt_out: 2887corrupt_out:
@@ -3039,16 +2916,6 @@ xfs_iflush_int(
3039 iip = ip->i_itemp; 2916 iip = ip->i_itemp;
3040 mp = ip->i_mount; 2917 mp = ip->i_mount;
3041 2918
3042
3043 /*
3044 * If the inode isn't dirty, then just release the inode
3045 * flush lock and do nothing.
3046 */
3047 if (xfs_inode_clean(ip)) {
3048 xfs_ifunlock(ip);
3049 return 0;
3050 }
3051
3052 /* set *dip = inode's place in the buffer */ 2919 /* set *dip = inode's place in the buffer */
3053 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 2920 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
3054 2921
@@ -3252,23 +3119,6 @@ corrupt_out:
3252 return XFS_ERROR(EFSCORRUPTED); 3119 return XFS_ERROR(EFSCORRUPTED);
3253} 3120}
3254 3121
3255
3256
3257#ifdef XFS_ILOCK_TRACE
3258void
3259xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3260{
3261 ktrace_enter(ip->i_lock_trace,
3262 (void *)ip,
3263 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3264 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3265 (void *)ra, /* caller of ilock */
3266 (void *)(unsigned long)current_cpu(),
3267 (void *)(unsigned long)current_pid(),
3268 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3269}
3270#endif
3271
3272/* 3122/*
3273 * Return a pointer to the extent record at file index idx. 3123 * Return a pointer to the extent record at file index idx.
3274 */ 3124 */
@@ -3300,13 +3150,17 @@ xfs_iext_get_ext(
3300 */ 3150 */
3301void 3151void
3302xfs_iext_insert( 3152xfs_iext_insert(
3303 xfs_ifork_t *ifp, /* inode fork pointer */ 3153 xfs_inode_t *ip, /* incore inode pointer */
3304 xfs_extnum_t idx, /* starting index of new items */ 3154 xfs_extnum_t idx, /* starting index of new items */
3305 xfs_extnum_t count, /* number of inserted items */ 3155 xfs_extnum_t count, /* number of inserted items */
3306 xfs_bmbt_irec_t *new) /* items to insert */ 3156 xfs_bmbt_irec_t *new, /* items to insert */
3157 int state) /* type of extent conversion */
3307{ 3158{
3159 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3308 xfs_extnum_t i; /* extent record index */ 3160 xfs_extnum_t i; /* extent record index */
3309 3161
3162 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
3163
3310 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3164 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3311 xfs_iext_add(ifp, idx, count); 3165 xfs_iext_add(ifp, idx, count);
3312 for (i = idx; i < idx + count; i++, new++) 3166 for (i = idx; i < idx + count; i++, new++)
@@ -3549,13 +3403,17 @@ xfs_iext_add_indirect_multi(
3549 */ 3403 */
3550void 3404void
3551xfs_iext_remove( 3405xfs_iext_remove(
3552 xfs_ifork_t *ifp, /* inode fork pointer */ 3406 xfs_inode_t *ip, /* incore inode pointer */
3553 xfs_extnum_t idx, /* index to begin removing exts */ 3407 xfs_extnum_t idx, /* index to begin removing exts */
3554 int ext_diff) /* number of extents to remove */ 3408 int ext_diff, /* number of extents to remove */
3409 int state) /* type of extent conversion */
3555{ 3410{
3411 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3556 xfs_extnum_t nextents; /* number of extents in file */ 3412 xfs_extnum_t nextents; /* number of extents in file */
3557 int new_size; /* size of extents after removal */ 3413 int new_size; /* size of extents after removal */
3558 3414
3415 trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
3416
3559 ASSERT(ext_diff > 0); 3417 ASSERT(ext_diff > 0);
3560 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3418 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3561 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 3419 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);