aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_inode.c')
-rw-r--r--fs/xfs/xfs_inode.c85
1 files changed, 37 insertions, 48 deletions
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 001aa893ed59..3a137e9f9a7d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -77,48 +77,44 @@ xfs_get_extsz_hint(
77} 77}
78 78
79/* 79/*
80 * This is a wrapper routine around the xfs_ilock() routine used to centralize 80 * These two are wrapper routines around the xfs_ilock() routine used to
81 * some grungy code. It is used in places that wish to lock the inode solely 81 * centralize some grungy code. They are used in places that wish to lock the
82 * for reading the extents. The reason these places can't just call 82 * inode solely for reading the extents. The reason these places can't just
83 * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the 83 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
84 * extents from disk for a file in b-tree format. If the inode is in b-tree 84 * bringing in of the extents from disk for a file in b-tree format. If the
85 * format, then we need to lock the inode exclusively until the extents are read 85 * inode is in b-tree format, then we need to lock the inode exclusively until
86 * in. Locking it exclusively all the time would limit our parallelism 86 * the extents are read in. Locking it exclusively all the time would limit
87 * unnecessarily, though. What we do instead is check to see if the extents 87 * our parallelism unnecessarily, though. What we do instead is check to see
88 * have been read in yet, and only lock the inode exclusively if they have not. 88 * if the extents have been read in yet, and only lock the inode exclusively
89 * if they have not.
89 * 90 *
90 * The function returns a value which should be given to the corresponding 91 * The functions return a value which should be given to the corresponding
91 * xfs_iunlock_map_shared(). This value is the mode in which the lock was 92 * xfs_iunlock() call.
92 * actually taken.
93 */ 93 */
94uint 94uint
95xfs_ilock_map_shared( 95xfs_ilock_data_map_shared(
96 xfs_inode_t *ip) 96 struct xfs_inode *ip)
97{ 97{
98 uint lock_mode; 98 uint lock_mode = XFS_ILOCK_SHARED;
99 99
100 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && 100 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
101 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { 101 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
102 lock_mode = XFS_ILOCK_EXCL; 102 lock_mode = XFS_ILOCK_EXCL;
103 } else {
104 lock_mode = XFS_ILOCK_SHARED;
105 }
106
107 xfs_ilock(ip, lock_mode); 103 xfs_ilock(ip, lock_mode);
108
109 return lock_mode; 104 return lock_mode;
110} 105}
111 106
112/* 107uint
113 * This is simply the unlock routine to go with xfs_ilock_map_shared(). 108xfs_ilock_attr_map_shared(
114 * All it does is call xfs_iunlock() with the given lock_mode. 109 struct xfs_inode *ip)
115 */
116void
117xfs_iunlock_map_shared(
118 xfs_inode_t *ip,
119 unsigned int lock_mode)
120{ 110{
121 xfs_iunlock(ip, lock_mode); 111 uint lock_mode = XFS_ILOCK_SHARED;
112
113 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
114 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
115 lock_mode = XFS_ILOCK_EXCL;
116 xfs_ilock(ip, lock_mode);
117 return lock_mode;
122} 118}
123 119
124/* 120/*
@@ -588,9 +584,9 @@ xfs_lookup(
588 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 584 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
589 return XFS_ERROR(EIO); 585 return XFS_ERROR(EIO);
590 586
591 lock_mode = xfs_ilock_map_shared(dp); 587 lock_mode = xfs_ilock_data_map_shared(dp);
592 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); 588 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
593 xfs_iunlock_map_shared(dp, lock_mode); 589 xfs_iunlock(dp, lock_mode);
594 590
595 if (error) 591 if (error)
596 goto out; 592 goto out;
@@ -2141,8 +2137,8 @@ xfs_ifree_cluster(
2141{ 2137{
2142 xfs_mount_t *mp = free_ip->i_mount; 2138 xfs_mount_t *mp = free_ip->i_mount;
2143 int blks_per_cluster; 2139 int blks_per_cluster;
2140 int inodes_per_cluster;
2144 int nbufs; 2141 int nbufs;
2145 int ninodes;
2146 int i, j; 2142 int i, j;
2147 xfs_daddr_t blkno; 2143 xfs_daddr_t blkno;
2148 xfs_buf_t *bp; 2144 xfs_buf_t *bp;
@@ -2152,18 +2148,11 @@ xfs_ifree_cluster(
2152 struct xfs_perag *pag; 2148 struct xfs_perag *pag;
2153 2149
2154 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); 2150 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2155 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 2151 blks_per_cluster = xfs_icluster_size_fsb(mp);
2156 blks_per_cluster = 1; 2152 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
2157 ninodes = mp->m_sb.sb_inopblock; 2153 nbufs = mp->m_ialloc_blks / blks_per_cluster;
2158 nbufs = XFS_IALLOC_BLOCKS(mp);
2159 } else {
2160 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2161 mp->m_sb.sb_blocksize;
2162 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2163 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2164 }
2165 2154
2166 for (j = 0; j < nbufs; j++, inum += ninodes) { 2155 for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
2167 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 2156 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2168 XFS_INO_TO_AGBNO(mp, inum)); 2157 XFS_INO_TO_AGBNO(mp, inum));
2169 2158
@@ -2225,7 +2214,7 @@ xfs_ifree_cluster(
2225 * transaction stale above, which means there is no point in 2214 * transaction stale above, which means there is no point in
2226 * even trying to lock them. 2215 * even trying to lock them.
2227 */ 2216 */
2228 for (i = 0; i < ninodes; i++) { 2217 for (i = 0; i < inodes_per_cluster; i++) {
2229retry: 2218retry:
2230 rcu_read_lock(); 2219 rcu_read_lock();
2231 ip = radix_tree_lookup(&pag->pag_ici_root, 2220 ip = radix_tree_lookup(&pag->pag_ici_root,
@@ -2906,13 +2895,13 @@ xfs_iflush_cluster(
2906 2895
2907 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2896 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2908 2897
2909 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; 2898 inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
2910 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); 2899 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2911 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); 2900 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2912 if (!ilist) 2901 if (!ilist)
2913 goto out_put; 2902 goto out_put;
2914 2903
2915 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2904 mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
2916 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2905 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2917 rcu_read_lock(); 2906 rcu_read_lock();
2918 /* really need a gang lookup range call here */ 2907 /* really need a gang lookup range call here */