diff options
author | Christoph Hellwig <hch@infradead.org> | 2011-12-18 15:00:09 -0500 |
---|---|---|
committer | Ben Myers <bpm@sgi.com> | 2012-01-17 16:06:45 -0500 |
commit | 474fce067521a40dbacc722e8ba119e81c2d31bf (patch) | |
tree | fd923aa42a5304182e8a8c64ca5d130f9afef286 /fs/xfs/xfs_iget.c | |
parent | 49e4c70e52a2bc2090e5a4e003e2888af21d6a2b (diff) |
xfs: replace i_flock with a sleeping bitlock
We almost never block on i_flock, the exception is synchronous inode
flushing. Instead of bloating the inode with a 16/24-byte completion
that we abuse as a semaphore just implement it as a bitlock that uses
a bit waitqueue for the rare sleeping path. This primarily is a
tradeoff between a much smaller inode and a faster non-blocking
path vs faster wakeups, and we are much better off with the former.
A small downside is that we will lose lockdep checking for i_flock, but
given that it's always taken inside the ilock that should be acceptable.
Note that for example the inode writeback locking is implemented in a
very similar way.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Alex Elder <aelder@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_iget.c')
-rw-r--r-- | fs/xfs/xfs_iget.c | 20 |
1 files changed, 18 insertions, 2 deletions
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index f180ce896cd7..a7cf7139f9ad 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -77,7 +77,7 @@ xfs_inode_alloc( | |||
77 | 77 | ||
78 | ASSERT(atomic_read(&ip->i_pincount) == 0); | 78 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
79 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); | 79 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); |
80 | ASSERT(completion_done(&ip->i_flush)); | 80 | ASSERT(!xfs_isiflocked(ip)); |
81 | ASSERT(ip->i_ino == 0); | 81 | ASSERT(ip->i_ino == 0); |
82 | 82 | ||
83 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); | 83 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); |
@@ -150,7 +150,7 @@ xfs_inode_free( | |||
150 | /* asserts to verify all state is correct here */ | 150 | /* asserts to verify all state is correct here */ |
151 | ASSERT(atomic_read(&ip->i_pincount) == 0); | 151 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
152 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); | 152 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); |
153 | ASSERT(completion_done(&ip->i_flush)); | 153 | ASSERT(!xfs_isiflocked(ip)); |
154 | 154 | ||
155 | /* | 155 | /* |
156 | * Because we use RCU freeing we need to ensure the inode always | 156 | * Because we use RCU freeing we need to ensure the inode always |
@@ -713,3 +713,19 @@ xfs_isilocked( | |||
713 | return 0; | 713 | return 0; |
714 | } | 714 | } |
715 | #endif | 715 | #endif |
716 | |||
717 | void | ||
718 | __xfs_iflock( | ||
719 | struct xfs_inode *ip) | ||
720 | { | ||
721 | wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT); | ||
722 | DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT); | ||
723 | |||
724 | do { | ||
725 | prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE); | ||
726 | if (xfs_isiflocked(ip)) | ||
727 | io_schedule(); | ||
728 | } while (!xfs_iflock_nowait(ip)); | ||
729 | |||
730 | finish_wait(wq, &wait.wait); | ||
731 | } | ||