aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_inode.c')
-rw-r--r--fs/xfs/xfs_inode.c186
1 files changed, 38 insertions, 148 deletions
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ef77fd88c8e3..0ffd56447045 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -151,7 +151,7 @@ xfs_imap_to_bp(
151 "an error %d on %s. Returning error.", 151 "an error %d on %s. Returning error.",
152 error, mp->m_fsname); 152 error, mp->m_fsname);
153 } else { 153 } else {
154 ASSERT(buf_flags & XFS_BUF_TRYLOCK); 154 ASSERT(buf_flags & XBF_TRYLOCK);
155 } 155 }
156 return error; 156 return error;
157 } 157 }
@@ -239,7 +239,7 @@ xfs_inotobp(
239 if (error) 239 if (error)
240 return error; 240 return error;
241 241
242 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags); 242 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags);
243 if (error) 243 if (error)
244 return error; 244 return error;
245 245
@@ -285,7 +285,7 @@ xfs_itobp(
285 return error; 285 return error;
286 286
287 if (!bp) { 287 if (!bp) {
288 ASSERT(buf_flags & XFS_BUF_TRYLOCK); 288 ASSERT(buf_flags & XBF_TRYLOCK);
289 ASSERT(tp == NULL); 289 ASSERT(tp == NULL);
290 *bpp = NULL; 290 *bpp = NULL;
291 return EAGAIN; 291 return EAGAIN;
@@ -807,7 +807,7 @@ xfs_iread(
807 * Get pointers to the on-disk inode and the buffer containing it. 807 * Get pointers to the on-disk inode and the buffer containing it.
808 */ 808 */
809 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 809 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
810 XFS_BUF_LOCK, iget_flags); 810 XBF_LOCK, iget_flags);
811 if (error) 811 if (error)
812 return error; 812 return error;
813 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 813 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
@@ -1751,7 +1751,7 @@ xfs_iunlink(
1751 * Here we put the head pointer into our next pointer, 1751 * Here we put the head pointer into our next pointer,
1752 * and then we fall through to point the head at us. 1752 * and then we fall through to point the head at us.
1753 */ 1753 */
1754 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1754 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1755 if (error) 1755 if (error)
1756 return error; 1756 return error;
1757 1757
@@ -1833,7 +1833,7 @@ xfs_iunlink_remove(
1833 * of dealing with the buffer when there is no need to 1833 * of dealing with the buffer when there is no need to
1834 * change it. 1834 * change it.
1835 */ 1835 */
1836 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1836 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1837 if (error) { 1837 if (error) {
1838 cmn_err(CE_WARN, 1838 cmn_err(CE_WARN,
1839 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 1839 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -1895,7 +1895,7 @@ xfs_iunlink_remove(
1895 * Now last_ibp points to the buffer previous to us on 1895 * Now last_ibp points to the buffer previous to us on
1896 * the unlinked list. Pull us from the list. 1896 * the unlinked list. Pull us from the list.
1897 */ 1897 */
1898 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1898 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1899 if (error) { 1899 if (error) {
1900 cmn_err(CE_WARN, 1900 cmn_err(CE_WARN,
1901 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 1901 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -1946,8 +1946,9 @@ xfs_ifree_cluster(
1946 xfs_inode_t *ip, **ip_found; 1946 xfs_inode_t *ip, **ip_found;
1947 xfs_inode_log_item_t *iip; 1947 xfs_inode_log_item_t *iip;
1948 xfs_log_item_t *lip; 1948 xfs_log_item_t *lip;
1949 xfs_perag_t *pag = xfs_get_perag(mp, inum); 1949 struct xfs_perag *pag;
1950 1950
1951 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
1951 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 1952 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
1952 blks_per_cluster = 1; 1953 blks_per_cluster = 1;
1953 ninodes = mp->m_sb.sb_inopblock; 1954 ninodes = mp->m_sb.sb_inopblock;
@@ -2039,7 +2040,7 @@ xfs_ifree_cluster(
2039 2040
2040 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2041 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2041 mp->m_bsize * blks_per_cluster, 2042 mp->m_bsize * blks_per_cluster,
2042 XFS_BUF_LOCK); 2043 XBF_LOCK);
2043 2044
2044 pre_flushed = 0; 2045 pre_flushed = 0;
2045 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 2046 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
@@ -2088,7 +2089,7 @@ xfs_ifree_cluster(
2088 } 2089 }
2089 2090
2090 kmem_free(ip_found); 2091 kmem_free(ip_found);
2091 xfs_put_perag(mp, pag); 2092 xfs_perag_put(pag);
2092} 2093}
2093 2094
2094/* 2095/*
@@ -2150,7 +2151,7 @@ xfs_ifree(
2150 2151
2151 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2152 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2152 2153
2153 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 2154 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK);
2154 if (error) 2155 if (error)
2155 return error; 2156 return error;
2156 2157
@@ -2438,72 +2439,31 @@ xfs_idestroy_fork(
2438} 2439}
2439 2440
2440/* 2441/*
2441 * Increment the pin count of the given buffer. 2442 * This is called to unpin an inode. The caller must have the inode locked
2442 * This value is protected by ipinlock spinlock in the mount structure. 2443 * in at least shared mode so that the buffer cannot be subsequently pinned
2444 * once someone is waiting for it to be unpinned.
2443 */ 2445 */
2444void 2446static void
2445xfs_ipin( 2447xfs_iunpin_nowait(
2446 xfs_inode_t *ip) 2448 struct xfs_inode *ip)
2447{
2448 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2449
2450 atomic_inc(&ip->i_pincount);
2451}
2452
2453/*
2454 * Decrement the pin count of the given inode, and wake up
2455 * anyone in xfs_iwait_unpin() if the count goes to 0. The
2456 * inode must have been previously pinned with a call to xfs_ipin().
2457 */
2458void
2459xfs_iunpin(
2460 xfs_inode_t *ip)
2461{
2462 ASSERT(atomic_read(&ip->i_pincount) > 0);
2463
2464 if (atomic_dec_and_test(&ip->i_pincount))
2465 wake_up(&ip->i_ipin_wait);
2466}
2467
2468/*
2469 * This is called to unpin an inode. It can be directed to wait or to return
2470 * immediately without waiting for the inode to be unpinned. The caller must
2471 * have the inode locked in at least shared mode so that the buffer cannot be
2472 * subsequently pinned once someone is waiting for it to be unpinned.
2473 */
2474STATIC void
2475__xfs_iunpin_wait(
2476 xfs_inode_t *ip,
2477 int wait)
2478{ 2449{
2479 xfs_inode_log_item_t *iip = ip->i_itemp;
2480
2481 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2450 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2482 if (atomic_read(&ip->i_pincount) == 0)
2483 return;
2484 2451
2485 /* Give the log a push to start the unpinning I/O */ 2452 /* Give the log a push to start the unpinning I/O */
2486 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ? 2453 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
2487 iip->ili_last_lsn : 0, XFS_LOG_FORCE);
2488 if (wait)
2489 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2490}
2491 2454
2492static inline void
2493xfs_iunpin_wait(
2494 xfs_inode_t *ip)
2495{
2496 __xfs_iunpin_wait(ip, 1);
2497} 2455}
2498 2456
2499static inline void 2457void
2500xfs_iunpin_nowait( 2458xfs_iunpin_wait(
2501 xfs_inode_t *ip) 2459 struct xfs_inode *ip)
2502{ 2460{
2503 __xfs_iunpin_wait(ip, 0); 2461 if (xfs_ipincount(ip)) {
2462 xfs_iunpin_nowait(ip);
2463 wait_event(ip->i_ipin_wait, (xfs_ipincount(ip) == 0));
2464 }
2504} 2465}
2505 2466
2506
2507/* 2467/*
2508 * xfs_iextents_copy() 2468 * xfs_iextents_copy()
2509 * 2469 *
@@ -2675,7 +2635,7 @@ xfs_iflush_cluster(
2675 xfs_buf_t *bp) 2635 xfs_buf_t *bp)
2676{ 2636{
2677 xfs_mount_t *mp = ip->i_mount; 2637 xfs_mount_t *mp = ip->i_mount;
2678 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); 2638 struct xfs_perag *pag;
2679 unsigned long first_index, mask; 2639 unsigned long first_index, mask;
2680 unsigned long inodes_per_cluster; 2640 unsigned long inodes_per_cluster;
2681 int ilist_size; 2641 int ilist_size;
@@ -2686,6 +2646,7 @@ xfs_iflush_cluster(
2686 int bufwasdelwri; 2646 int bufwasdelwri;
2687 int i; 2647 int i;
2688 2648
2649 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2689 ASSERT(pag->pagi_inodeok); 2650 ASSERT(pag->pagi_inodeok);
2690 ASSERT(pag->pag_ici_init); 2651 ASSERT(pag->pag_ici_init);
2691 2652
@@ -2693,7 +2654,7 @@ xfs_iflush_cluster(
2693 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); 2654 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2694 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); 2655 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2695 if (!ilist) 2656 if (!ilist)
2696 return 0; 2657 goto out_put;
2697 2658
2698 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2659 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2699 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2660 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
@@ -2762,6 +2723,8 @@ xfs_iflush_cluster(
2762out_free: 2723out_free:
2763 read_unlock(&pag->pag_ici_lock); 2724 read_unlock(&pag->pag_ici_lock);
2764 kmem_free(ilist); 2725 kmem_free(ilist);
2726out_put:
2727 xfs_perag_put(pag);
2765 return 0; 2728 return 0;
2766 2729
2767 2730
@@ -2805,6 +2768,7 @@ cluster_corrupt_out:
2805 */ 2768 */
2806 xfs_iflush_abort(iq); 2769 xfs_iflush_abort(iq);
2807 kmem_free(ilist); 2770 kmem_free(ilist);
2771 xfs_perag_put(pag);
2808 return XFS_ERROR(EFSCORRUPTED); 2772 return XFS_ERROR(EFSCORRUPTED);
2809} 2773}
2810 2774
@@ -2827,8 +2791,6 @@ xfs_iflush(
2827 xfs_dinode_t *dip; 2791 xfs_dinode_t *dip;
2828 xfs_mount_t *mp; 2792 xfs_mount_t *mp;
2829 int error; 2793 int error;
2830 int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
2831 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
2832 2794
2833 XFS_STATS_INC(xs_iflush_count); 2795 XFS_STATS_INC(xs_iflush_count);
2834 2796
@@ -2841,15 +2803,6 @@ xfs_iflush(
2841 mp = ip->i_mount; 2803 mp = ip->i_mount;
2842 2804
2843 /* 2805 /*
2844 * If the inode isn't dirty, then just release the inode flush lock and
2845 * do nothing.
2846 */
2847 if (xfs_inode_clean(ip)) {
2848 xfs_ifunlock(ip);
2849 return 0;
2850 }
2851
2852 /*
2853 * We can't flush the inode until it is unpinned, so wait for it if we 2806 * We can't flush the inode until it is unpinned, so wait for it if we
2854 * are allowed to block. We know noone new can pin it, because we are 2807 * are allowed to block. We know noone new can pin it, because we are
2855 * holding the inode lock shared and you need to hold it exclusively to 2808 * holding the inode lock shared and you need to hold it exclusively to
@@ -2860,7 +2813,7 @@ xfs_iflush(
2860 * in the same cluster are dirty, they will probably write the inode 2813 * in the same cluster are dirty, they will probably write the inode
2861 * out for us if they occur after the log force completes. 2814 * out for us if they occur after the log force completes.
2862 */ 2815 */
2863 if (noblock && xfs_ipincount(ip)) { 2816 if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
2864 xfs_iunpin_nowait(ip); 2817 xfs_iunpin_nowait(ip);
2865 xfs_ifunlock(ip); 2818 xfs_ifunlock(ip);
2866 return EAGAIN; 2819 return EAGAIN;
@@ -2894,60 +2847,10 @@ xfs_iflush(
2894 } 2847 }
2895 2848
2896 /* 2849 /*
2897 * Decide how buffer will be flushed out. This is done before
2898 * the call to xfs_iflush_int because this field is zeroed by it.
2899 */
2900 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
2901 /*
2902 * Flush out the inode buffer according to the directions
2903 * of the caller. In the cases where the caller has given
2904 * us a choice choose the non-delwri case. This is because
2905 * the inode is in the AIL and we need to get it out soon.
2906 */
2907 switch (flags) {
2908 case XFS_IFLUSH_SYNC:
2909 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
2910 flags = 0;
2911 break;
2912 case XFS_IFLUSH_ASYNC_NOBLOCK:
2913 case XFS_IFLUSH_ASYNC:
2914 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
2915 flags = INT_ASYNC;
2916 break;
2917 case XFS_IFLUSH_DELWRI:
2918 flags = INT_DELWRI;
2919 break;
2920 default:
2921 ASSERT(0);
2922 flags = 0;
2923 break;
2924 }
2925 } else {
2926 switch (flags) {
2927 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
2928 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
2929 case XFS_IFLUSH_DELWRI:
2930 flags = INT_DELWRI;
2931 break;
2932 case XFS_IFLUSH_ASYNC_NOBLOCK:
2933 case XFS_IFLUSH_ASYNC:
2934 flags = INT_ASYNC;
2935 break;
2936 case XFS_IFLUSH_SYNC:
2937 flags = 0;
2938 break;
2939 default:
2940 ASSERT(0);
2941 flags = 0;
2942 break;
2943 }
2944 }
2945
2946 /*
2947 * Get the buffer containing the on-disk inode. 2850 * Get the buffer containing the on-disk inode.
2948 */ 2851 */
2949 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 2852 error = xfs_itobp(mp, NULL, ip, &dip, &bp,
2950 noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK); 2853 (flags & SYNC_WAIT) ? XBF_LOCK : XBF_TRYLOCK);
2951 if (error || !bp) { 2854 if (error || !bp) {
2952 xfs_ifunlock(ip); 2855 xfs_ifunlock(ip);
2953 return error; 2856 return error;
@@ -2965,7 +2868,7 @@ xfs_iflush(
2965 * get stuck waiting in the write for too long. 2868 * get stuck waiting in the write for too long.
2966 */ 2869 */
2967 if (XFS_BUF_ISPINNED(bp)) 2870 if (XFS_BUF_ISPINNED(bp))
2968 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 2871 xfs_log_force(mp, 0);
2969 2872
2970 /* 2873 /*
2971 * inode clustering: 2874 * inode clustering:
@@ -2975,13 +2878,10 @@ xfs_iflush(
2975 if (error) 2878 if (error)
2976 goto cluster_corrupt_out; 2879 goto cluster_corrupt_out;
2977 2880
2978 if (flags & INT_DELWRI) { 2881 if (flags & SYNC_WAIT)
2979 xfs_bdwrite(mp, bp);
2980 } else if (flags & INT_ASYNC) {
2981 error = xfs_bawrite(mp, bp);
2982 } else {
2983 error = xfs_bwrite(mp, bp); 2882 error = xfs_bwrite(mp, bp);
2984 } 2883 else
2884 xfs_bdwrite(mp, bp);
2985 return error; 2885 return error;
2986 2886
2987corrupt_out: 2887corrupt_out:
@@ -3016,16 +2916,6 @@ xfs_iflush_int(
3016 iip = ip->i_itemp; 2916 iip = ip->i_itemp;
3017 mp = ip->i_mount; 2917 mp = ip->i_mount;
3018 2918
3019
3020 /*
3021 * If the inode isn't dirty, then just release the inode
3022 * flush lock and do nothing.
3023 */
3024 if (xfs_inode_clean(ip)) {
3025 xfs_ifunlock(ip);
3026 return 0;
3027 }
3028
3029 /* set *dip = inode's place in the buffer */ 2919 /* set *dip = inode's place in the buffer */
3030 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 2920 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
3031 2921