diff options
author | Dave Chinner <dchinner@redhat.com> | 2010-12-16 01:08:41 -0500 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2010-12-16 01:08:41 -0500 |
commit | 1a427ab0c1b205d1bda8da0b77ea9d295ac23c57 (patch) | |
tree | dc0fffd22282d0da29da43b3ebdeed7c3f5fac1d /fs/xfs/xfs_iget.c | |
parent | 1a3e8f3da09c7082d25b512a0ffe569391e4c09a (diff) |
xfs: convert pag_ici_lock to a spin lock
now that we are using RCU protection for the inode cache lookups,
the lock is only needed on the modification side. Hence it is not
necessary for the lock to be a rwlock as there are no read side
holders anymore. Convert it to a spin lock to reflect it's exclusive
nature.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Alex Elder <aelder@sgi.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/xfs_iget.c')
-rw-r--r-- | fs/xfs/xfs_iget.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 04ed09b907b8..3ecad00e8409 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -260,7 +260,7 @@ xfs_iget_cache_hit( | |||
260 | goto out_error; | 260 | goto out_error; |
261 | } | 261 | } |
262 | 262 | ||
263 | write_lock(&pag->pag_ici_lock); | 263 | spin_lock(&pag->pag_ici_lock); |
264 | spin_lock(&ip->i_flags_lock); | 264 | spin_lock(&ip->i_flags_lock); |
265 | ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); | 265 | ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); |
266 | ip->i_flags |= XFS_INEW; | 266 | ip->i_flags |= XFS_INEW; |
@@ -273,7 +273,7 @@ xfs_iget_cache_hit( | |||
273 | &xfs_iolock_active, "xfs_iolock_active"); | 273 | &xfs_iolock_active, "xfs_iolock_active"); |
274 | 274 | ||
275 | spin_unlock(&ip->i_flags_lock); | 275 | spin_unlock(&ip->i_flags_lock); |
276 | write_unlock(&pag->pag_ici_lock); | 276 | spin_unlock(&pag->pag_ici_lock); |
277 | } else { | 277 | } else { |
278 | /* If the VFS inode is being torn down, pause and try again. */ | 278 | /* If the VFS inode is being torn down, pause and try again. */ |
279 | if (!igrab(inode)) { | 279 | if (!igrab(inode)) { |
@@ -351,7 +351,7 @@ xfs_iget_cache_miss( | |||
351 | BUG(); | 351 | BUG(); |
352 | } | 352 | } |
353 | 353 | ||
354 | write_lock(&pag->pag_ici_lock); | 354 | spin_lock(&pag->pag_ici_lock); |
355 | 355 | ||
356 | /* insert the new inode */ | 356 | /* insert the new inode */ |
357 | error = radix_tree_insert(&pag->pag_ici_root, agino, ip); | 357 | error = radix_tree_insert(&pag->pag_ici_root, agino, ip); |
@@ -366,14 +366,14 @@ xfs_iget_cache_miss( | |||
366 | ip->i_udquot = ip->i_gdquot = NULL; | 366 | ip->i_udquot = ip->i_gdquot = NULL; |
367 | xfs_iflags_set(ip, XFS_INEW); | 367 | xfs_iflags_set(ip, XFS_INEW); |
368 | 368 | ||
369 | write_unlock(&pag->pag_ici_lock); | 369 | spin_unlock(&pag->pag_ici_lock); |
370 | radix_tree_preload_end(); | 370 | radix_tree_preload_end(); |
371 | 371 | ||
372 | *ipp = ip; | 372 | *ipp = ip; |
373 | return 0; | 373 | return 0; |
374 | 374 | ||
375 | out_preload_end: | 375 | out_preload_end: |
376 | write_unlock(&pag->pag_ici_lock); | 376 | spin_unlock(&pag->pag_ici_lock); |
377 | radix_tree_preload_end(); | 377 | radix_tree_preload_end(); |
378 | if (lock_flags) | 378 | if (lock_flags) |
379 | xfs_iunlock(ip, lock_flags); | 379 | xfs_iunlock(ip, lock_flags); |