diff options
author | Christoph Hellwig <hch@infradead.org> | 2009-10-19 00:03:46 -0400 |
---|---|---|
committer | Alex Elder <aelder@sgi.com> | 2009-12-11 16:11:19 -0500 |
commit | c56c9631cbe88f08854a56ff9776c1f310916830 (patch) | |
tree | 9069de0b572857072bc99334b8d5e1bce14ef13f /fs | |
parent | 848ce8f731aed0a2d4ab5884a4f6664af73d2dd0 (diff) |
xfs: fix mmap_sem/iolock inversion in xfs_free_eofblocks
When xfs_free_eofblocks is called from ->release the VM might already
hold the mmap_sem, but in the write path we take the iolock before
taking the mmap_sem in the generic write code.
Switch xfs_free_eofblocks to only trylock the iolock if called from
->release and skip trimming the prellocated blocks in that case.
We'll still free them later on the final iput.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Alex Elder <aelder@sgi.com>
Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/xfs/xfs_rw.h | 7 | ||||
-rw-r--r-- | fs/xfs/xfs_vnodeops.c | 34 |
2 files changed, 26 insertions, 15 deletions
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h index f5e4874c37d8..726014d1c925 100644 --- a/fs/xfs/xfs_rw.h +++ b/fs/xfs/xfs_rw.h | |||
@@ -37,13 +37,6 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) | |||
37 | } | 37 | } |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * Flags for xfs_free_eofblocks | ||
41 | */ | ||
42 | #define XFS_FREE_EOF_LOCK (1<<0) | ||
43 | #define XFS_FREE_EOF_NOLOCK (1<<1) | ||
44 | |||
45 | |||
46 | /* | ||
47 | * helper function to extract extent size hint from inode | 40 | * helper function to extract extent size hint from inode |
48 | */ | 41 | */ |
49 | STATIC_INLINE xfs_extlen_t | 42 | STATIC_INLINE xfs_extlen_t |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 3fac146b3b7d..d98401470cf0 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -709,6 +709,11 @@ xfs_fsync( | |||
709 | } | 709 | } |
710 | 710 | ||
711 | /* | 711 | /* |
712 | * Flags for xfs_free_eofblocks | ||
713 | */ | ||
714 | #define XFS_FREE_EOF_TRYLOCK (1<<0) | ||
715 | |||
716 | /* | ||
712 | * This is called by xfs_inactive to free any blocks beyond eof | 717 | * This is called by xfs_inactive to free any blocks beyond eof |
713 | * when the link count isn't zero and by xfs_dm_punch_hole() when | 718 | * when the link count isn't zero and by xfs_dm_punch_hole() when |
714 | * punching a hole to EOF. | 719 | * punching a hole to EOF. |
@@ -726,7 +731,6 @@ xfs_free_eofblocks( | |||
726 | xfs_filblks_t map_len; | 731 | xfs_filblks_t map_len; |
727 | int nimaps; | 732 | int nimaps; |
728 | xfs_bmbt_irec_t imap; | 733 | xfs_bmbt_irec_t imap; |
729 | int use_iolock = (flags & XFS_FREE_EOF_LOCK); | ||
730 | 734 | ||
731 | /* | 735 | /* |
732 | * Figure out if there are any blocks beyond the end | 736 | * Figure out if there are any blocks beyond the end |
@@ -768,14 +772,19 @@ xfs_free_eofblocks( | |||
768 | * cache and we can't | 772 | * cache and we can't |
769 | * do that within a transaction. | 773 | * do that within a transaction. |
770 | */ | 774 | */ |
771 | if (use_iolock) | 775 | if (flags & XFS_FREE_EOF_TRYLOCK) { |
776 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { | ||
777 | xfs_trans_cancel(tp, 0); | ||
778 | return 0; | ||
779 | } | ||
780 | } else { | ||
772 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | 781 | xfs_ilock(ip, XFS_IOLOCK_EXCL); |
782 | } | ||
773 | error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, | 783 | error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, |
774 | ip->i_size); | 784 | ip->i_size); |
775 | if (error) { | 785 | if (error) { |
776 | xfs_trans_cancel(tp, 0); | 786 | xfs_trans_cancel(tp, 0); |
777 | if (use_iolock) | 787 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
778 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | ||
779 | return error; | 788 | return error; |
780 | } | 789 | } |
781 | 790 | ||
@@ -812,8 +821,7 @@ xfs_free_eofblocks( | |||
812 | error = xfs_trans_commit(tp, | 821 | error = xfs_trans_commit(tp, |
813 | XFS_TRANS_RELEASE_LOG_RES); | 822 | XFS_TRANS_RELEASE_LOG_RES); |
814 | } | 823 | } |
815 | xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL) | 824 | xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL); |
816 | : XFS_ILOCK_EXCL)); | ||
817 | } | 825 | } |
818 | return error; | 826 | return error; |
819 | } | 827 | } |
@@ -1113,7 +1121,17 @@ xfs_release( | |||
1113 | (ip->i_df.if_flags & XFS_IFEXTENTS)) && | 1121 | (ip->i_df.if_flags & XFS_IFEXTENTS)) && |
1114 | (!(ip->i_d.di_flags & | 1122 | (!(ip->i_d.di_flags & |
1115 | (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { | 1123 | (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { |
1116 | error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); | 1124 | |
1125 | /* | ||
1126 | * If we can't get the iolock just skip truncating | ||
1127 | * the blocks past EOF because we could deadlock | ||
1128 | * with the mmap_sem otherwise. We'll get another | ||
1129 | * chance to drop them once the last reference to | ||
1130 | * the inode is dropped, so we'll never leak blocks | ||
1131 | * permanently. | ||
1132 | */ | ||
1133 | error = xfs_free_eofblocks(mp, ip, | ||
1134 | XFS_FREE_EOF_TRYLOCK); | ||
1117 | if (error) | 1135 | if (error) |
1118 | return error; | 1136 | return error; |
1119 | } | 1137 | } |
@@ -1184,7 +1202,7 @@ xfs_inactive( | |||
1184 | (!(ip->i_d.di_flags & | 1202 | (!(ip->i_d.di_flags & |
1185 | (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || | 1203 | (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || |
1186 | (ip->i_delayed_blks != 0)))) { | 1204 | (ip->i_delayed_blks != 0)))) { |
1187 | error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); | 1205 | error = xfs_free_eofblocks(mp, ip, 0); |
1188 | if (error) | 1206 | if (error) |
1189 | return VN_INACTIVE_CACHE; | 1207 | return VN_INACTIVE_CACHE; |
1190 | } | 1208 | } |