aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid Chinner <david@fromorbit.com>2008-08-13 02:41:16 -0400
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-08-13 02:41:16 -0400
commitc63942d3eeffb98219e05d0976862ab9907d297d (patch)
tree2f00c2aa9df30c3e91c06aa46c5722382093c5de /fs
parent39d2f1ab2a36ac527a6c41cfe689f50c239eaca3 (diff)
[XFS] replace inode flush semaphore with a completion
Use the new completion flush code to implement the inode flush lock. Removes one of the final users of semaphores in the XFS code base. SGI-PV: 981498 SGI-Modid: xfs-linux-melb:xfs-kern:31817a Signed-off-by: David Chinner <david@fromorbit.com> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/xfs_iget.c32
-rw-r--r--fs/xfs/xfs_inode.c11
-rw-r--r--fs/xfs/xfs_inode.h25
-rw-r--r--fs/xfs/xfs_inode_item.c11
4 files changed, 39 insertions, 40 deletions
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 539c2dd8eae8..e229e9e001c2 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -216,7 +216,14 @@ finish_inode:
216 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 216 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
217 init_waitqueue_head(&ip->i_ipin_wait); 217 init_waitqueue_head(&ip->i_ipin_wait);
218 atomic_set(&ip->i_pincount, 0); 218 atomic_set(&ip->i_pincount, 0);
219 initnsema(&ip->i_flock, 1, "xfsfino"); 219
220 /*
221 * Because we want to use a counting completion, complete
222 * the flush completion once to allow a single access to
223 * the flush completion without blocking.
224 */
225 init_completion(&ip->i_flush);
226 complete(&ip->i_flush);
220 227
221 if (lock_flags) 228 if (lock_flags)
222 xfs_ilock(ip, lock_flags); 229 xfs_ilock(ip, lock_flags);
@@ -783,26 +790,3 @@ xfs_isilocked(
783} 790}
784#endif 791#endif
785 792
786/*
787 * The following three routines simply manage the i_flock
788 * semaphore embedded in the inode. This semaphore synchronizes
789 * processes attempting to flush the in-core inode back to disk.
790 */
791void
792xfs_iflock(xfs_inode_t *ip)
793{
794 psema(&(ip->i_flock), PINOD|PLTWAIT);
795}
796
797int
798xfs_iflock_nowait(xfs_inode_t *ip)
799{
800 return (cpsema(&(ip->i_flock)));
801}
802
803void
804xfs_ifunlock(xfs_inode_t *ip)
805{
806 ASSERT(issemalocked(&(ip->i_flock)));
807 vsema(&(ip->i_flock));
808}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b8444ee4dc95..aea62222b3a1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2626,7 +2626,6 @@ xfs_idestroy(
2626 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 2626 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2627 mrfree(&ip->i_lock); 2627 mrfree(&ip->i_lock);
2628 mrfree(&ip->i_iolock); 2628 mrfree(&ip->i_iolock);
2629 freesema(&ip->i_flock);
2630 2629
2631#ifdef XFS_INODE_TRACE 2630#ifdef XFS_INODE_TRACE
2632 ktrace_free(ip->i_trace); 2631 ktrace_free(ip->i_trace);
@@ -3044,10 +3043,10 @@ cluster_corrupt_out:
3044/* 3043/*
3045 * xfs_iflush() will write a modified inode's changes out to the 3044 * xfs_iflush() will write a modified inode's changes out to the
3046 * inode's on disk home. The caller must have the inode lock held 3045 * inode's on disk home. The caller must have the inode lock held
3047 * in at least shared mode and the inode flush semaphore must be 3046 * in at least shared mode and the inode flush completion must be
3048 * held as well. The inode lock will still be held upon return from 3047 * active as well. The inode lock will still be held upon return from
3049 * the call and the caller is free to unlock it. 3048 * the call and the caller is free to unlock it.
3050 * The inode flush lock will be unlocked when the inode reaches the disk. 3049 * The inode flush will be completed when the inode reaches the disk.
3051 * The flags indicate how the inode's buffer should be written out. 3050 * The flags indicate how the inode's buffer should be written out.
3052 */ 3051 */
3053int 3052int
@@ -3066,7 +3065,7 @@ xfs_iflush(
3066 XFS_STATS_INC(xs_iflush_count); 3065 XFS_STATS_INC(xs_iflush_count);
3067 3066
3068 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3067 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3069 ASSERT(issemalocked(&(ip->i_flock))); 3068 ASSERT(!completion_done(&ip->i_flush));
3070 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3069 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3071 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3070 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3072 3071
@@ -3229,7 +3228,7 @@ xfs_iflush_int(
3229#endif 3228#endif
3230 3229
3231 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3230 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3232 ASSERT(issemalocked(&(ip->i_flock))); 3231 ASSERT(!completion_done(&ip->i_flush));
3233 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3232 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3234 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3233 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3235 3234
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 571724404355..f771df6bfa6f 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -223,7 +223,7 @@ typedef struct xfs_inode {
223 struct xfs_inode_log_item *i_itemp; /* logging information */ 223 struct xfs_inode_log_item *i_itemp; /* logging information */
224 mrlock_t i_lock; /* inode lock */ 224 mrlock_t i_lock; /* inode lock */
225 mrlock_t i_iolock; /* inode IO lock */ 225 mrlock_t i_iolock; /* inode IO lock */
226 sema_t i_flock; /* inode flush lock */ 226 struct completion i_flush; /* inode flush completion q */
227 atomic_t i_pincount; /* inode pin count */ 227 atomic_t i_pincount; /* inode pin count */
228 wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ 228 wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */
229 spinlock_t i_flags_lock; /* inode i_flags lock */ 229 spinlock_t i_flags_lock; /* inode i_flags lock */
@@ -482,11 +482,8 @@ int xfs_ilock_nowait(xfs_inode_t *, uint);
482void xfs_iunlock(xfs_inode_t *, uint); 482void xfs_iunlock(xfs_inode_t *, uint);
483void xfs_ilock_demote(xfs_inode_t *, uint); 483void xfs_ilock_demote(xfs_inode_t *, uint);
484int xfs_isilocked(xfs_inode_t *, uint); 484int xfs_isilocked(xfs_inode_t *, uint);
485void xfs_iflock(xfs_inode_t *);
486int xfs_iflock_nowait(xfs_inode_t *);
487uint xfs_ilock_map_shared(xfs_inode_t *); 485uint xfs_ilock_map_shared(xfs_inode_t *);
488void xfs_iunlock_map_shared(xfs_inode_t *, uint); 486void xfs_iunlock_map_shared(xfs_inode_t *, uint);
489void xfs_ifunlock(xfs_inode_t *);
490void xfs_ireclaim(xfs_inode_t *); 487void xfs_ireclaim(xfs_inode_t *);
491int xfs_finish_reclaim(xfs_inode_t *, int, int); 488int xfs_finish_reclaim(xfs_inode_t *, int, int);
492int xfs_finish_reclaim_all(struct xfs_mount *, int); 489int xfs_finish_reclaim_all(struct xfs_mount *, int);
@@ -580,6 +577,26 @@ extern struct kmem_zone *xfs_ifork_zone;
580extern struct kmem_zone *xfs_inode_zone; 577extern struct kmem_zone *xfs_inode_zone;
581extern struct kmem_zone *xfs_ili_zone; 578extern struct kmem_zone *xfs_ili_zone;
582 579
580/*
581 * Manage the i_flush queue embedded in the inode. This completion
582 * queue synchronizes processes attempting to flush the in-core
583 * inode back to disk.
584 */
585static inline void xfs_iflock(xfs_inode_t *ip)
586{
587 wait_for_completion(&ip->i_flush);
588}
589
590static inline int xfs_iflock_nowait(xfs_inode_t *ip)
591{
592 return try_wait_for_completion(&ip->i_flush);
593}
594
595static inline void xfs_ifunlock(xfs_inode_t *ip)
596{
597 complete(&ip->i_flush);
598}
599
583#endif /* __KERNEL__ */ 600#endif /* __KERNEL__ */
584 601
585#endif /* __XFS_INODE_H__ */ 602#endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 0eee08a32c26..97c7452e2620 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -779,11 +779,10 @@ xfs_inode_item_pushbuf(
779 ASSERT(iip->ili_push_owner == current_pid()); 779 ASSERT(iip->ili_push_owner == current_pid());
780 780
781 /* 781 /*
782 * If flushlock isn't locked anymore, chances are that the 782 * If a flush is not in progress anymore, chances are that the
783 * inode flush completed and the inode was taken off the AIL. 783 * inode was taken off the AIL. So, just get out.
784 * So, just get out.
785 */ 784 */
786 if (!issemalocked(&(ip->i_flock)) || 785 if (completion_done(&ip->i_flush) ||
787 ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { 786 ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
788 iip->ili_pushbuf_flag = 0; 787 iip->ili_pushbuf_flag = 0;
789 xfs_iunlock(ip, XFS_ILOCK_SHARED); 788 xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -805,7 +804,7 @@ xfs_inode_item_pushbuf(
805 * If not, we can flush it async. 804 * If not, we can flush it async.
806 */ 805 */
807 dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) && 806 dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
808 issemalocked(&(ip->i_flock))); 807 !completion_done(&ip->i_flush));
809 iip->ili_pushbuf_flag = 0; 808 iip->ili_pushbuf_flag = 0;
810 xfs_iunlock(ip, XFS_ILOCK_SHARED); 809 xfs_iunlock(ip, XFS_ILOCK_SHARED);
811 xfs_buftrace("INODE ITEM PUSH", bp); 810 xfs_buftrace("INODE ITEM PUSH", bp);
@@ -858,7 +857,7 @@ xfs_inode_item_push(
858 ip = iip->ili_inode; 857 ip = iip->ili_inode;
859 858
860 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); 859 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
861 ASSERT(issemalocked(&(ip->i_flock))); 860 ASSERT(!completion_done(&ip->i_flush));
862 /* 861 /*
863 * Since we were able to lock the inode's flush lock and 862 * Since we were able to lock the inode's flush lock and
864 * we found it on the AIL, the inode must be dirty. This 863 * we found it on the AIL, the inode must be dirty. This