aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h5
-rw-r--r--fs/xfs/xfs_inode.c135
-rw-r--r--fs/xfs/xfs_inode.h3
-rw-r--r--fs/xfs/xfs_itable.c3
-rw-r--r--fs/xfs/xfs_log_recover.c3
-rw-r--r--fs/xfs/xfs_trans_buf.c3
-rw-r--r--fs/xfs/xfs_vnodeops.c55
8 files changed, 105 insertions, 105 deletions
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 8831d9518790..cb9ce90d1deb 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -896,7 +896,8 @@ xfs_fs_write_inode(
896 struct inode *inode, 896 struct inode *inode,
897 int sync) 897 int sync)
898{ 898{
899 int error = 0, flags = FLUSH_INODE; 899 int error = 0;
900 int flags = 0;
900 901
901 xfs_itrace_entry(XFS_I(inode)); 902 xfs_itrace_entry(XFS_I(inode));
902 if (sync) { 903 if (sync) {
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index b5ea418693b1..f200e0244082 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -73,12 +73,9 @@ typedef enum bhv_vrwlock {
73#define IO_INVIS 0x00020 /* don't update inode timestamps */ 73#define IO_INVIS 0x00020 /* don't update inode timestamps */
74 74
75/* 75/*
76 * Flags for vop_iflush call 76 * Flags for xfs_inode_flush
77 */ 77 */
78#define FLUSH_SYNC 1 /* wait for flush to complete */ 78#define FLUSH_SYNC 1 /* wait for flush to complete */
79#define FLUSH_INODE 2 /* flush the inode itself */
80#define FLUSH_LOG 4 /* force the last log entry for
81 * this inode out to disk */
82 79
83/* 80/*
84 * Flush/Invalidate options for vop_toss/flush/flushinval_pages. 81 * Flush/Invalidate options for vop_toss/flush/flushinval_pages.
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 6f156faf9d46..3c3e9e3c1da8 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -145,11 +145,16 @@ xfs_imap_to_bp(
145 xfs_buf_t *bp; 145 xfs_buf_t *bp;
146 146
147 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, 147 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
148 (int)imap->im_len, XFS_BUF_LOCK, &bp); 148 (int)imap->im_len, buf_flags, &bp);
149 if (error) { 149 if (error) {
150 cmn_err(CE_WARN, "xfs_imap_to_bp: xfs_trans_read_buf()returned " 150 if (error != EAGAIN) {
151 cmn_err(CE_WARN,
152 "xfs_imap_to_bp: xfs_trans_read_buf()returned "
151 "an error %d on %s. Returning error.", 153 "an error %d on %s. Returning error.",
152 error, mp->m_fsname); 154 error, mp->m_fsname);
155 } else {
156 ASSERT(buf_flags & XFS_BUF_TRYLOCK);
157 }
153 return error; 158 return error;
154 } 159 }
155 160
@@ -274,7 +279,8 @@ xfs_itobp(
274 xfs_dinode_t **dipp, 279 xfs_dinode_t **dipp,
275 xfs_buf_t **bpp, 280 xfs_buf_t **bpp,
276 xfs_daddr_t bno, 281 xfs_daddr_t bno,
277 uint imap_flags) 282 uint imap_flags,
283 uint buf_flags)
278{ 284{
279 xfs_imap_t imap; 285 xfs_imap_t imap;
280 xfs_buf_t *bp; 286 xfs_buf_t *bp;
@@ -305,10 +311,17 @@ xfs_itobp(
305 } 311 }
306 ASSERT(bno == 0 || bno == imap.im_blkno); 312 ASSERT(bno == 0 || bno == imap.im_blkno);
307 313
308 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags); 314 error = xfs_imap_to_bp(mp, tp, &imap, &bp, buf_flags, imap_flags);
309 if (error) 315 if (error)
310 return error; 316 return error;
311 317
318 if (!bp) {
319 ASSERT(buf_flags & XFS_BUF_TRYLOCK);
320 ASSERT(tp == NULL);
321 *bpp = NULL;
322 return EAGAIN;
323 }
324
312 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); 325 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
313 *bpp = bp; 326 *bpp = bp;
314 return 0; 327 return 0;
@@ -812,7 +825,7 @@ xfs_iread(
812 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will 825 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will
813 * know that this is a new incore inode. 826 * know that this is a new incore inode.
814 */ 827 */
815 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags); 828 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags, XFS_BUF_LOCK);
816 if (error) { 829 if (error) {
817 kmem_zone_free(xfs_inode_zone, ip); 830 kmem_zone_free(xfs_inode_zone, ip);
818 return error; 831 return error;
@@ -1901,7 +1914,7 @@ xfs_iunlink(
1901 * Here we put the head pointer into our next pointer, 1914 * Here we put the head pointer into our next pointer,
1902 * and then we fall through to point the head at us. 1915 * and then we fall through to point the head at us.
1903 */ 1916 */
1904 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 1917 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
1905 if (error) 1918 if (error)
1906 return error; 1919 return error;
1907 1920
@@ -2009,7 +2022,7 @@ xfs_iunlink_remove(
2009 * of dealing with the buffer when there is no need to 2022 * of dealing with the buffer when there is no need to
2010 * change it. 2023 * change it.
2011 */ 2024 */
2012 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 2025 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
2013 if (error) { 2026 if (error) {
2014 cmn_err(CE_WARN, 2027 cmn_err(CE_WARN,
2015 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 2028 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -2071,7 +2084,7 @@ xfs_iunlink_remove(
2071 * Now last_ibp points to the buffer previous to us on 2084 * Now last_ibp points to the buffer previous to us on
2072 * the unlinked list. Pull us from the list. 2085 * the unlinked list. Pull us from the list.
2073 */ 2086 */
2074 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 2087 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
2075 if (error) { 2088 if (error) {
2076 cmn_err(CE_WARN, 2089 cmn_err(CE_WARN,
2077 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 2090 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -2334,7 +2347,7 @@ xfs_ifree(
2334 2347
2335 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2348 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2336 2349
2337 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0); 2350 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
2338 if (error) 2351 if (error)
2339 return error; 2352 return error;
2340 2353
@@ -2777,38 +2790,41 @@ xfs_iunpin(
2777} 2790}
2778 2791
2779/* 2792/*
2780 * This is called to wait for the given inode to be unpinned. 2793 * This is called to unpin an inode. It can be directed to wait or to return
2781 * It will sleep until this happens. The caller must have the 2794 * immediately without waiting for the inode to be unpinned. The caller must
2782 * inode locked in at least shared mode so that the buffer cannot 2795 * have the inode locked in at least shared mode so that the buffer cannot be
2783 * be subsequently pinned once someone is waiting for it to be 2796 * subsequently pinned once someone is waiting for it to be unpinned.
2784 * unpinned.
2785 */ 2797 */
2786STATIC void 2798STATIC void
2787xfs_iunpin_wait( 2799__xfs_iunpin_wait(
2788 xfs_inode_t *ip) 2800 xfs_inode_t *ip,
2801 int wait)
2789{ 2802{
2790 xfs_inode_log_item_t *iip; 2803 xfs_inode_log_item_t *iip = ip->i_itemp;
2791 xfs_lsn_t lsn;
2792 2804
2793 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS)); 2805 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
2794 2806 if (atomic_read(&ip->i_pincount) == 0)
2795 if (atomic_read(&ip->i_pincount) == 0) {
2796 return; 2807 return;
2797 }
2798 2808
2799 iip = ip->i_itemp; 2809 /* Give the log a push to start the unpinning I/O */
2800 if (iip && iip->ili_last_lsn) { 2810 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ?
2801 lsn = iip->ili_last_lsn; 2811 iip->ili_last_lsn : 0, XFS_LOG_FORCE);
2802 } else { 2812 if (wait)
2803 lsn = (xfs_lsn_t)0; 2813 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2804 } 2814}
2805 2815
2806 /* 2816static inline void
2807 * Give the log a push so we don't wait here too long. 2817xfs_iunpin_wait(
2808 */ 2818 xfs_inode_t *ip)
2809 xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE); 2819{
2820 __xfs_iunpin_wait(ip, 1);
2821}
2810 2822
2811 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); 2823static inline void
2824xfs_iunpin_nowait(
2825 xfs_inode_t *ip)
2826{
2827 __xfs_iunpin_wait(ip, 0);
2812} 2828}
2813 2829
2814 2830
@@ -3003,6 +3019,7 @@ xfs_iflush(
3003 int bufwasdelwri; 3019 int bufwasdelwri;
3004 struct hlist_node *entry; 3020 struct hlist_node *entry;
3005 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; 3021 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
3022 int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
3006 3023
3007 XFS_STATS_INC(xs_iflush_count); 3024 XFS_STATS_INC(xs_iflush_count);
3008 3025
@@ -3027,11 +3044,21 @@ xfs_iflush(
3027 } 3044 }
3028 3045
3029 /* 3046 /*
3030 * We can't flush the inode until it is unpinned, so 3047 * We can't flush the inode until it is unpinned, so wait for it if we
3031 * wait for it. We know noone new can pin it, because 3048 * are allowed to block. We know noone new can pin it, because we are
3032 * we are holding the inode lock shared and you need 3049 * holding the inode lock shared and you need to hold it exclusively to
3033 * to hold it exclusively to pin the inode. 3050 * pin the inode.
3051 *
3052 * If we are not allowed to block, force the log out asynchronously so
3053 * that when we come back the inode will be unpinned. If other inodes
3054 * in the same cluster are dirty, they will probably write the inode
3055 * out for us if they occur after the log force completes.
3034 */ 3056 */
3057 if (noblock && xfs_ipincount(ip)) {
3058 xfs_iunpin_nowait(ip);
3059 xfs_ifunlock(ip);
3060 return EAGAIN;
3061 }
3035 xfs_iunpin_wait(ip); 3062 xfs_iunpin_wait(ip);
3036 3063
3037 /* 3064 /*
@@ -3048,15 +3075,6 @@ xfs_iflush(
3048 } 3075 }
3049 3076
3050 /* 3077 /*
3051 * Get the buffer containing the on-disk inode.
3052 */
3053 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0);
3054 if (error) {
3055 xfs_ifunlock(ip);
3056 return error;
3057 }
3058
3059 /*
3060 * Decide how buffer will be flushed out. This is done before 3078 * Decide how buffer will be flushed out. This is done before
3061 * the call to xfs_iflush_int because this field is zeroed by it. 3079 * the call to xfs_iflush_int because this field is zeroed by it.
3062 */ 3080 */
@@ -3072,6 +3090,7 @@ xfs_iflush(
3072 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 3090 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3073 flags = 0; 3091 flags = 0;
3074 break; 3092 break;
3093 case XFS_IFLUSH_ASYNC_NOBLOCK:
3075 case XFS_IFLUSH_ASYNC: 3094 case XFS_IFLUSH_ASYNC:
3076 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 3095 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3077 flags = INT_ASYNC; 3096 flags = INT_ASYNC;
@@ -3091,6 +3110,7 @@ xfs_iflush(
3091 case XFS_IFLUSH_DELWRI: 3110 case XFS_IFLUSH_DELWRI:
3092 flags = INT_DELWRI; 3111 flags = INT_DELWRI;
3093 break; 3112 break;
3113 case XFS_IFLUSH_ASYNC_NOBLOCK:
3094 case XFS_IFLUSH_ASYNC: 3114 case XFS_IFLUSH_ASYNC:
3095 flags = INT_ASYNC; 3115 flags = INT_ASYNC;
3096 break; 3116 break;
@@ -3105,6 +3125,16 @@ xfs_iflush(
3105 } 3125 }
3106 3126
3107 /* 3127 /*
3128 * Get the buffer containing the on-disk inode.
3129 */
3130 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0,
3131 noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK);
3132 if (error || !bp) {
3133 xfs_ifunlock(ip);
3134 return error;
3135 }
3136
3137 /*
3108 * First flush out the inode that xfs_iflush was called with. 3138 * First flush out the inode that xfs_iflush was called with.
3109 */ 3139 */
3110 error = xfs_iflush_int(ip, bp); 3140 error = xfs_iflush_int(ip, bp);
@@ -3113,6 +3143,13 @@ xfs_iflush(
3113 } 3143 }
3114 3144
3115 /* 3145 /*
3146 * If the buffer is pinned then push on the log now so we won't
3147 * get stuck waiting in the write for too long.
3148 */
3149 if (XFS_BUF_ISPINNED(bp))
3150 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3151
3152 /*
3116 * inode clustering: 3153 * inode clustering:
3117 * see if other inodes can be gathered into this write 3154 * see if other inodes can be gathered into this write
3118 */ 3155 */
@@ -3181,14 +3218,6 @@ xfs_iflush(
3181 XFS_STATS_ADD(xs_icluster_flushinode, clcount); 3218 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
3182 } 3219 }
3183 3220
3184 /*
3185 * If the buffer is pinned then push on the log so we won't
3186 * get stuck waiting in the write for too long.
3187 */
3188 if (XFS_BUF_ISPINNED(bp)){
3189 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3190 }
3191
3192 if (flags & INT_DELWRI) { 3221 if (flags & INT_DELWRI) {
3193 xfs_bdwrite(mp, bp); 3222 xfs_bdwrite(mp, bp);
3194 } else if (flags & INT_ASYNC) { 3223 } else if (flags & INT_ASYNC) {
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index eaa01895ff93..c3bfffca9214 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -457,6 +457,7 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
457#define XFS_IFLUSH_SYNC 3 457#define XFS_IFLUSH_SYNC 3
458#define XFS_IFLUSH_ASYNC 4 458#define XFS_IFLUSH_ASYNC 4
459#define XFS_IFLUSH_DELWRI 5 459#define XFS_IFLUSH_DELWRI 5
460#define XFS_IFLUSH_ASYNC_NOBLOCK 6
460 461
461/* 462/*
462 * Flags for xfs_itruncate_start(). 463 * Flags for xfs_itruncate_start().
@@ -511,7 +512,7 @@ int xfs_finish_reclaim_all(struct xfs_mount *, int);
511 */ 512 */
512int xfs_itobp(struct xfs_mount *, struct xfs_trans *, 513int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
513 xfs_inode_t *, struct xfs_dinode **, struct xfs_buf **, 514 xfs_inode_t *, struct xfs_dinode **, struct xfs_buf **,
514 xfs_daddr_t, uint); 515 xfs_daddr_t, uint, uint);
515int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, 516int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
516 xfs_inode_t **, xfs_daddr_t, uint); 517 xfs_inode_t **, xfs_daddr_t, uint);
517int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); 518int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f615e04364f4..45d8776408ef 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -614,7 +614,8 @@ xfs_bulkstat(
614 xfs_buf_relse(bp); 614 xfs_buf_relse(bp);
615 error = xfs_itobp(mp, NULL, ip, 615 error = xfs_itobp(mp, NULL, ip,
616 &dip, &bp, bno, 616 &dip, &bp, bno,
617 XFS_IMAP_BULKSTAT); 617 XFS_IMAP_BULKSTAT,
618 XFS_BUF_LOCK);
618 if (!error) 619 if (!error)
619 clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; 620 clustidx = ip->i_boffset / mp->m_sb.sb_inodesize;
620 kmem_zone_free(xfs_inode_zone, ip); 621 kmem_zone_free(xfs_inode_zone, ip);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b2b70eba282c..cd24711ae276 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3214,7 +3214,8 @@ xlog_recover_process_iunlinks(
3214 * next inode in the bucket. 3214 * next inode in the bucket.
3215 */ 3215 */
3216 error = xfs_itobp(mp, NULL, ip, &dip, 3216 error = xfs_itobp(mp, NULL, ip, &dip,
3217 &ibp, 0, 0); 3217 &ibp, 0, 0,
3218 XFS_BUF_LOCK);
3218 ASSERT(error || (dip != NULL)); 3219 ASSERT(error || (dip != NULL));
3219 } 3220 }
3220 3221
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 60b6b898022b..4e5c010f5040 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -304,7 +304,8 @@ xfs_trans_read_buf(
304 if (tp == NULL) { 304 if (tp == NULL) {
305 bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); 305 bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY);
306 if (!bp) 306 if (!bp)
307 return XFS_ERROR(ENOMEM); 307 return (flags & XFS_BUF_TRYLOCK) ?
308 EAGAIN : XFS_ERROR(ENOMEM);
308 309
309 if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) { 310 if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) {
310 xfs_ioerror_alert("xfs_trans_read_buf", mp, 311 xfs_ioerror_alert("xfs_trans_read_buf", mp,
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 40b95e3a88ba..14140f6225ba 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -3468,29 +3468,6 @@ xfs_inode_flush(
3468 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) 3468 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)))
3469 return 0; 3469 return 0;
3470 3470
3471 if (flags & FLUSH_LOG) {
3472 if (iip && iip->ili_last_lsn) {
3473 xlog_t *log = mp->m_log;
3474 xfs_lsn_t sync_lsn;
3475 int log_flags = XFS_LOG_FORCE;
3476
3477 spin_lock(&log->l_grant_lock);
3478 sync_lsn = log->l_last_sync_lsn;
3479 spin_unlock(&log->l_grant_lock);
3480
3481 if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) {
3482 if (flags & FLUSH_SYNC)
3483 log_flags |= XFS_LOG_SYNC;
3484 error = xfs_log_force(mp, iip->ili_last_lsn, log_flags);
3485 if (error)
3486 return error;
3487 }
3488
3489 if (ip->i_update_core == 0)
3490 return 0;
3491 }
3492 }
3493
3494 /* 3471 /*
3495 * We make this non-blocking if the inode is contended, 3472 * We make this non-blocking if the inode is contended,
3496 * return EAGAIN to indicate to the caller that they 3473 * return EAGAIN to indicate to the caller that they
@@ -3498,30 +3475,22 @@ xfs_inode_flush(
3498 * blocking on inodes inside another operation right 3475 * blocking on inodes inside another operation right
3499 * now, they get caught later by xfs_sync. 3476 * now, they get caught later by xfs_sync.
3500 */ 3477 */
3501 if (flags & FLUSH_INODE) { 3478 if (flags & FLUSH_SYNC) {
3502 int flush_flags; 3479 xfs_ilock(ip, XFS_ILOCK_SHARED);
3503 3480 xfs_iflock(ip);
3504 if (flags & FLUSH_SYNC) { 3481 } else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3505 xfs_ilock(ip, XFS_ILOCK_SHARED); 3482 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) {
3506 xfs_iflock(ip); 3483 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3507 } else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3508 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) {
3509 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3510 return EAGAIN;
3511 }
3512 } else {
3513 return EAGAIN; 3484 return EAGAIN;
3514 } 3485 }
3515 3486 } else {
3516 if (flags & FLUSH_SYNC) 3487 return EAGAIN;
3517 flush_flags = XFS_IFLUSH_SYNC;
3518 else
3519 flush_flags = XFS_IFLUSH_ASYNC;
3520
3521 error = xfs_iflush(ip, flush_flags);
3522 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3523 } 3488 }
3524 3489
3490 error = xfs_iflush(ip, (flags & FLUSH_SYNC) ? XFS_IFLUSH_SYNC
3491 : XFS_IFLUSH_ASYNC_NOBLOCK);
3492 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3493
3525 return error; 3494 return error;
3526} 3495}
3527 3496