aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_iget.c
diff options
context:
space:
mode:
authorNiv Sardi <xaiki@debian.org>2008-11-06 23:07:12 -0500
committerNiv Sardi <xaiki@debian.org>2008-11-06 23:07:12 -0500
commitdcd7b4e5c0649b1d2219399529b20de1df517e55 (patch)
treeef00739e48ddda0a30061d62a7348ed4b0c9aeeb /fs/xfs/xfs_iget.c
parent75fa67706cce5272bcfc51ed646f2da21f3bdb6e (diff)
parent91b777125175077fb74025608dba87f100586c62 (diff)
Merge branch 'master' of git://oss.sgi.com:8090/xfs/linux-2.6
Diffstat (limited to 'fs/xfs/xfs_iget.c')
-rw-r--r--fs/xfs/xfs_iget.c501
1 files changed, 199 insertions, 302 deletions
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index e229e9e001c2..bf4dc5eb4cfc 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -38,164 +38,122 @@
38#include "xfs_ialloc.h" 38#include "xfs_ialloc.h"
39#include "xfs_quota.h" 39#include "xfs_quota.h"
40#include "xfs_utils.h" 40#include "xfs_utils.h"
41#include "xfs_trans_priv.h"
42#include "xfs_inode_item.h"
41 43
42/* 44/*
43 * Look up an inode by number in the given file system. 45 * Check the validity of the inode we just found it the cache
44 * The inode is looked up in the cache held in each AG.
45 * If the inode is found in the cache, attach it to the provided
46 * vnode.
47 *
48 * If it is not in core, read it in from the file system's device,
49 * add it to the cache and attach the provided vnode.
50 *
51 * The inode is locked according to the value of the lock_flags parameter.
52 * This flag parameter indicates how and if the inode's IO lock and inode lock
53 * should be taken.
54 *
55 * mp -- the mount point structure for the current file system. It points
56 * to the inode hash table.
57 * tp -- a pointer to the current transaction if there is one. This is
58 * simply passed through to the xfs_iread() call.
59 * ino -- the number of the inode desired. This is the unique identifier
60 * within the file system for the inode being requested.
61 * lock_flags -- flags indicating how to lock the inode. See the comment
62 * for xfs_ilock() for a list of valid values.
63 * bno -- the block number starting the buffer containing the inode,
64 * if known (as by bulkstat), else 0.
65 */ 46 */
66STATIC int 47static int
67xfs_iget_core( 48xfs_iget_cache_hit(
68 struct inode *inode, 49 struct xfs_perag *pag,
69 xfs_mount_t *mp, 50 struct xfs_inode *ip,
70 xfs_trans_t *tp, 51 int flags,
71 xfs_ino_t ino, 52 int lock_flags) __releases(pag->pag_ici_lock)
72 uint flags,
73 uint lock_flags,
74 xfs_inode_t **ipp,
75 xfs_daddr_t bno)
76{ 53{
77 struct inode *old_inode; 54 struct xfs_mount *mp = ip->i_mount;
78 xfs_inode_t *ip; 55 int error = EAGAIN;
79 xfs_inode_t *iq;
80 int error;
81 unsigned long first_index, mask;
82 xfs_perag_t *pag;
83 xfs_agino_t agino;
84 56
85 /* the radix tree exists only in inode capable AGs */ 57 /*
86 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi) 58 * If INEW is set this inode is being set up
87 return EINVAL; 59 * If IRECLAIM is set this inode is being torn down
88 60 * Pause and try again.
89 /* get the perag structure and ensure that it's inode capable */ 61 */
90 pag = xfs_get_perag(mp, ino); 62 if (xfs_iflags_test(ip, (XFS_INEW|XFS_IRECLAIM))) {
91 if (!pag->pagi_inodeok) 63 XFS_STATS_INC(xs_ig_frecycle);
92 return EINVAL; 64 goto out_error;
93 ASSERT(pag->pag_ici_init); 65 }
94 agino = XFS_INO_TO_AGINO(mp, ino);
95 66
96again: 67 /* If IRECLAIMABLE is set, we've torn down the vfs inode part */
97 read_lock(&pag->pag_ici_lock); 68 if (xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
98 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
99 69
100 if (ip != NULL) {
101 /* 70 /*
102 * If INEW is set this inode is being set up 71 * If lookup is racing with unlink, then we should return an
103 * we need to pause and try again. 72 * error immediately so we don't remove it from the reclaim
73 * list and potentially leak the inode.
104 */ 74 */
105 if (xfs_iflags_test(ip, XFS_INEW)) { 75 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
106 read_unlock(&pag->pag_ici_lock); 76 error = ENOENT;
107 delay(1); 77 goto out_error;
108 XFS_STATS_INC(xs_ig_frecycle);
109
110 goto again;
111 } 78 }
112 79
113 old_inode = ip->i_vnode; 80 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
114 if (old_inode == NULL) {
115 /*
116 * If IRECLAIM is set this inode is
117 * on its way out of the system,
118 * we need to pause and try again.
119 */
120 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
121 read_unlock(&pag->pag_ici_lock);
122 delay(1);
123 XFS_STATS_INC(xs_ig_frecycle);
124
125 goto again;
126 }
127 ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE));
128
129 /*
130 * If lookup is racing with unlink, then we
131 * should return an error immediately so we
132 * don't remove it from the reclaim list and
133 * potentially leak the inode.
134 */
135 if ((ip->i_d.di_mode == 0) &&
136 !(flags & XFS_IGET_CREATE)) {
137 read_unlock(&pag->pag_ici_lock);
138 xfs_put_perag(mp, pag);
139 return ENOENT;
140 }
141
142 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
143
144 XFS_STATS_INC(xs_ig_found);
145 xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
146 read_unlock(&pag->pag_ici_lock);
147
148 XFS_MOUNT_ILOCK(mp);
149 list_del_init(&ip->i_reclaim);
150 XFS_MOUNT_IUNLOCK(mp);
151
152 goto finish_inode;
153
154 } else if (inode != old_inode) {
155 /* The inode is being torn down, pause and
156 * try again.
157 */
158 if (old_inode->i_state & (I_FREEING | I_CLEAR)) {
159 read_unlock(&pag->pag_ici_lock);
160 delay(1);
161 XFS_STATS_INC(xs_ig_frecycle);
162
163 goto again;
164 }
165/* Chances are the other vnode (the one in the inode) is being torn
166* down right now, and we landed on top of it. Question is, what do
167* we do? Unhook the old inode and hook up the new one?
168*/
169 cmn_err(CE_PANIC,
170 "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
171 old_inode, inode);
172 }
173 81
174 /* 82 /*
175 * Inode cache hit 83 * We need to re-initialise the VFS inode as it has been
84 * 'freed' by the VFS. Do this here so we can deal with
85 * errors cleanly, then tag it so it can be set up correctly
86 * later.
176 */ 87 */
177 read_unlock(&pag->pag_ici_lock); 88 if (!inode_init_always(mp->m_super, VFS_I(ip))) {
178 XFS_STATS_INC(xs_ig_found); 89 error = ENOMEM;
179 90 goto out_error;
180finish_inode:
181 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
182 xfs_put_perag(mp, pag);
183 return ENOENT;
184 } 91 }
185 92
186 if (lock_flags != 0) 93 /*
187 xfs_ilock(ip, lock_flags); 94 * We must set the XFS_INEW flag before clearing the
95 * XFS_IRECLAIMABLE flag so that if a racing lookup does
96 * not find the XFS_IRECLAIMABLE above but has the igrab()
97 * below succeed we can safely check XFS_INEW to detect
98 * that this inode is still being initialised.
99 */
100 xfs_iflags_set(ip, XFS_INEW);
101 xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
102
103 /* clear the radix tree reclaim flag as well. */
104 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
105 } else if (!igrab(VFS_I(ip))) {
106 /* If the VFS inode is being torn down, pause and try again. */
107 XFS_STATS_INC(xs_ig_frecycle);
108 goto out_error;
109 } else if (xfs_iflags_test(ip, XFS_INEW)) {
110 /*
111 * We are racing with another cache hit that is
112 * currently recycling this inode out of the XFS_IRECLAIMABLE
113 * state. Wait for the initialisation to complete before
114 * continuing.
115 */
116 wait_on_inode(VFS_I(ip));
117 }
188 118
189 xfs_iflags_clear(ip, XFS_ISTALE); 119 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
190 xfs_itrace_exit_tag(ip, "xfs_iget.found"); 120 error = ENOENT;
191 goto return_ip; 121 iput(VFS_I(ip));
122 goto out_error;
192 } 123 }
193 124
194 /* 125 /* We've got a live one. */
195 * Inode cache miss 126 read_unlock(&pag->pag_ici_lock);
196 */ 127
128 if (lock_flags != 0)
129 xfs_ilock(ip, lock_flags);
130
131 xfs_iflags_clear(ip, XFS_ISTALE);
132 xfs_itrace_exit_tag(ip, "xfs_iget.found");
133 XFS_STATS_INC(xs_ig_found);
134 return 0;
135
136out_error:
197 read_unlock(&pag->pag_ici_lock); 137 read_unlock(&pag->pag_ici_lock);
198 XFS_STATS_INC(xs_ig_missed); 138 return error;
139}
140
141
142static int
143xfs_iget_cache_miss(
144 struct xfs_mount *mp,
145 struct xfs_perag *pag,
146 xfs_trans_t *tp,
147 xfs_ino_t ino,
148 struct xfs_inode **ipp,
149 xfs_daddr_t bno,
150 int flags,
151 int lock_flags) __releases(pag->pag_ici_lock)
152{
153 struct xfs_inode *ip;
154 int error;
155 unsigned long first_index, mask;
156 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
199 157
200 /* 158 /*
201 * Read the disk inode attributes into a new inode structure and get 159 * Read the disk inode attributes into a new inode structure and get
@@ -203,116 +161,85 @@ finish_inode:
203 */ 161 */
204 error = xfs_iread(mp, tp, ino, &ip, bno, 162 error = xfs_iread(mp, tp, ino, &ip, bno,
205 (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0); 163 (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0);
206 if (error) { 164 if (error)
207 xfs_put_perag(mp, pag);
208 return error; 165 return error;
209 }
210 166
211 xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); 167 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
212 168
213 169 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
214 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 170 error = ENOENT;
215 "xfsino", ip->i_ino); 171 goto out_destroy;
216 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 172 }
217 init_waitqueue_head(&ip->i_ipin_wait);
218 atomic_set(&ip->i_pincount, 0);
219
220 /*
221 * Because we want to use a counting completion, complete
222 * the flush completion once to allow a single access to
223 * the flush completion without blocking.
224 */
225 init_completion(&ip->i_flush);
226 complete(&ip->i_flush);
227 173
228 if (lock_flags) 174 if (lock_flags)
229 xfs_ilock(ip, lock_flags); 175 xfs_ilock(ip, lock_flags);
230 176
231 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
232 xfs_idestroy(ip);
233 xfs_put_perag(mp, pag);
234 return ENOENT;
235 }
236
237 /* 177 /*
238 * Preload the radix tree so we can insert safely under the 178 * Preload the radix tree so we can insert safely under the
239 * write spinlock. 179 * write spinlock. Note that we cannot sleep inside the preload
180 * region.
240 */ 181 */
241 if (radix_tree_preload(GFP_KERNEL)) { 182 if (radix_tree_preload(GFP_KERNEL)) {
242 xfs_idestroy(ip); 183 error = EAGAIN;
243 delay(1); 184 goto out_unlock;
244 goto again;
245 } 185 }
186
246 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 187 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
247 first_index = agino & mask; 188 first_index = agino & mask;
248 write_lock(&pag->pag_ici_lock); 189 write_lock(&pag->pag_ici_lock);
249 /* 190
250 * insert the new inode 191 /* insert the new inode */
251 */
252 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); 192 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
253 if (unlikely(error)) { 193 if (unlikely(error)) {
254 BUG_ON(error != -EEXIST); 194 WARN_ON(error != -EEXIST);
255 write_unlock(&pag->pag_ici_lock);
256 radix_tree_preload_end();
257 xfs_idestroy(ip);
258 XFS_STATS_INC(xs_ig_dup); 195 XFS_STATS_INC(xs_ig_dup);
259 goto again; 196 error = EAGAIN;
197 goto out_preload_end;
260 } 198 }
261 199
262 /* 200 /* These values _must_ be set before releasing the radix tree lock! */
263 * These values _must_ be set before releasing the radix tree lock!
264 */
265 ip->i_udquot = ip->i_gdquot = NULL; 201 ip->i_udquot = ip->i_gdquot = NULL;
266 xfs_iflags_set(ip, XFS_INEW); 202 xfs_iflags_set(ip, XFS_INEW);
267 203
268 write_unlock(&pag->pag_ici_lock); 204 write_unlock(&pag->pag_ici_lock);
269 radix_tree_preload_end(); 205 radix_tree_preload_end();
270
271 /*
272 * Link ip to its mount and thread it on the mount's inode list.
273 */
274 XFS_MOUNT_ILOCK(mp);
275 if ((iq = mp->m_inodes)) {
276 ASSERT(iq->i_mprev->i_mnext == iq);
277 ip->i_mprev = iq->i_mprev;
278 iq->i_mprev->i_mnext = ip;
279 iq->i_mprev = ip;
280 ip->i_mnext = iq;
281 } else {
282 ip->i_mnext = ip;
283 ip->i_mprev = ip;
284 }
285 mp->m_inodes = ip;
286
287 XFS_MOUNT_IUNLOCK(mp);
288 xfs_put_perag(mp, pag);
289
290 return_ip:
291 ASSERT(ip->i_df.if_ext_max ==
292 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
293
294 xfs_iflags_set(ip, XFS_IMODIFIED);
295 *ipp = ip; 206 *ipp = ip;
296
297 /*
298 * Set up the Linux with the Linux inode.
299 */
300 ip->i_vnode = inode;
301 inode->i_private = ip;
302
303 /*
304 * If we have a real type for an on-disk inode, we can set ops(&unlock)
305 * now. If it's a new inode being created, xfs_ialloc will handle it.
306 */
307 if (ip->i_d.di_mode != 0)
308 xfs_setup_inode(ip);
309 return 0; 207 return 0;
310}
311 208
209out_preload_end:
210 write_unlock(&pag->pag_ici_lock);
211 radix_tree_preload_end();
212out_unlock:
213 if (lock_flags)
214 xfs_iunlock(ip, lock_flags);
215out_destroy:
216 xfs_destroy_inode(ip);
217 return error;
218}
312 219
313/* 220/*
314 * The 'normal' internal xfs_iget, if needed it will 221 * Look up an inode by number in the given file system.
315 * 'allocate', or 'get', the vnode. 222 * The inode is looked up in the cache held in each AG.
223 * If the inode is found in the cache, initialise the vfs inode
224 * if necessary.
225 *
226 * If it is not in core, read it in from the file system's device,
227 * add it to the cache and initialise the vfs inode.
228 *
229 * The inode is locked according to the value of the lock_flags parameter.
230 * This flag parameter indicates how and if the inode's IO lock and inode lock
231 * should be taken.
232 *
233 * mp -- the mount point structure for the current file system. It points
234 * to the inode hash table.
235 * tp -- a pointer to the current transaction if there is one. This is
236 * simply passed through to the xfs_iread() call.
237 * ino -- the number of the inode desired. This is the unique identifier
238 * within the file system for the inode being requested.
239 * lock_flags -- flags indicating how to lock the inode. See the comment
240 * for xfs_ilock() for a list of valid values.
241 * bno -- the block number starting the buffer containing the inode,
242 * if known (as by bulkstat), else 0.
316 */ 243 */
317int 244int
318xfs_iget( 245xfs_iget(
@@ -324,61 +251,65 @@ xfs_iget(
324 xfs_inode_t **ipp, 251 xfs_inode_t **ipp,
325 xfs_daddr_t bno) 252 xfs_daddr_t bno)
326{ 253{
327 struct inode *inode;
328 xfs_inode_t *ip; 254 xfs_inode_t *ip;
329 int error; 255 int error;
256 xfs_perag_t *pag;
257 xfs_agino_t agino;
330 258
331 XFS_STATS_INC(xs_ig_attempts); 259 /* the radix tree exists only in inode capable AGs */
332 260 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
333retry: 261 return EINVAL;
334 inode = iget_locked(mp->m_super, ino); 262
335 if (!inode) 263 /* get the perag structure and ensure that it's inode capable */
336 /* If we got no inode we are out of memory */ 264 pag = xfs_get_perag(mp, ino);
337 return ENOMEM; 265 if (!pag->pagi_inodeok)
338 266 return EINVAL;
339 if (inode->i_state & I_NEW) { 267 ASSERT(pag->pag_ici_init);
340 XFS_STATS_INC(vn_active); 268 agino = XFS_INO_TO_AGINO(mp, ino);
341 XFS_STATS_INC(vn_alloc); 269
342 270again:
343 error = xfs_iget_core(inode, mp, tp, ino, flags, 271 error = 0;
344 lock_flags, ipp, bno); 272 read_lock(&pag->pag_ici_lock);
345 if (error) { 273 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
346 make_bad_inode(inode); 274
347 if (inode->i_state & I_NEW) 275 if (ip) {
348 unlock_new_inode(inode); 276 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
349 iput(inode); 277 if (error)
350 } 278 goto out_error_or_again;
351 return error; 279 } else {
280 read_unlock(&pag->pag_ici_lock);
281 XFS_STATS_INC(xs_ig_missed);
282
283 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
284 flags, lock_flags);
285 if (error)
286 goto out_error_or_again;
352 } 287 }
288 xfs_put_perag(mp, pag);
289
290 xfs_iflags_set(ip, XFS_IMODIFIED);
291 *ipp = ip;
353 292
293 ASSERT(ip->i_df.if_ext_max ==
294 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
354 /* 295 /*
355 * If the inode is not fully constructed due to 296 * If we have a real type for an on-disk inode, we can set ops(&unlock)
356 * filehandle mismatches wait for the inode to go 297 * now. If it's a new inode being created, xfs_ialloc will handle it.
357 * away and try again.
358 *
359 * iget_locked will call __wait_on_freeing_inode
360 * to wait for the inode to go away.
361 */ 298 */
362 if (is_bad_inode(inode)) { 299 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
363 iput(inode); 300 xfs_setup_inode(ip);
364 delay(1); 301 return 0;
365 goto retry;
366 }
367 302
368 ip = XFS_I(inode); 303out_error_or_again:
369 if (!ip) { 304 if (error == EAGAIN) {
370 iput(inode);
371 delay(1); 305 delay(1);
372 goto retry; 306 goto again;
373 } 307 }
374 308 xfs_put_perag(mp, pag);
375 if (lock_flags != 0) 309 return error;
376 xfs_ilock(ip, lock_flags);
377 XFS_STATS_INC(xs_ig_found);
378 *ipp = ip;
379 return 0;
380} 310}
381 311
312
382/* 313/*
383 * Look for the inode corresponding to the given ino in the hash table. 314 * Look for the inode corresponding to the given ino in the hash table.
384 * If it is there and its i_transp pointer matches tp, return it. 315 * If it is there and its i_transp pointer matches tp, return it.
@@ -462,14 +393,13 @@ xfs_ireclaim(xfs_inode_t *ip)
462 xfs_iextract(ip); 393 xfs_iextract(ip);
463 394
464 /* 395 /*
465 * Here we do a spurious inode lock in order to coordinate with 396 * Here we do a spurious inode lock in order to coordinate with inode
466 * xfs_sync(). This is because xfs_sync() references the inodes 397 * cache radix tree lookups. This is because the lookup can reference
467 * in the mount list without taking references on the corresponding 398 * the inodes in the cache without taking references. We make that OK
468 * vnodes. We make that OK here by ensuring that we wait until 399 * here by ensuring that we wait until the inode is unlocked after the
469 * the inode is unlocked in xfs_sync() before we go ahead and 400 * lookup before we go ahead and free it. We get both the ilock and
470 * free it. We get both the regular lock and the io lock because 401 * the iolock because the code may need to drop the ilock one but will
471 * the xfs_sync() code may need to drop the regular one but will 402 * still hold the iolock.
472 * still hold the io lock.
473 */ 403 */
474 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 404 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
475 405
@@ -480,14 +410,6 @@ xfs_ireclaim(xfs_inode_t *ip)
480 XFS_QM_DQDETACH(ip->i_mount, ip); 410 XFS_QM_DQDETACH(ip->i_mount, ip);
481 411
482 /* 412 /*
483 * Pull our behavior descriptor from the vnode chain.
484 */
485 if (ip->i_vnode) {
486 ip->i_vnode->i_private = NULL;
487 ip->i_vnode = NULL;
488 }
489
490 /*
491 * Free all memory associated with the inode. 413 * Free all memory associated with the inode.
492 */ 414 */
493 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 415 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
@@ -505,38 +427,13 @@ xfs_iextract(
505{ 427{
506 xfs_mount_t *mp = ip->i_mount; 428 xfs_mount_t *mp = ip->i_mount;
507 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); 429 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
508 xfs_inode_t *iq;
509 430
510 write_lock(&pag->pag_ici_lock); 431 write_lock(&pag->pag_ici_lock);
511 radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino)); 432 radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
512 write_unlock(&pag->pag_ici_lock); 433 write_unlock(&pag->pag_ici_lock);
513 xfs_put_perag(mp, pag); 434 xfs_put_perag(mp, pag);
514 435
515 /*
516 * Remove from mount's inode list.
517 */
518 XFS_MOUNT_ILOCK(mp);
519 ASSERT((ip->i_mnext != NULL) && (ip->i_mprev != NULL));
520 iq = ip->i_mnext;
521 iq->i_mprev = ip->i_mprev;
522 ip->i_mprev->i_mnext = iq;
523
524 /*
525 * Fix up the head pointer if it points to the inode being deleted.
526 */
527 if (mp->m_inodes == ip) {
528 if (ip == iq) {
529 mp->m_inodes = NULL;
530 } else {
531 mp->m_inodes = iq;
532 }
533 }
534
535 /* Deal with the deleted inodes list */
536 list_del_init(&ip->i_reclaim);
537
538 mp->m_ireclaims++; 436 mp->m_ireclaims++;
539 XFS_MOUNT_IUNLOCK(mp);
540} 437}
541 438
542/* 439/*
@@ -737,7 +634,7 @@ xfs_iunlock(
737 * it is in the AIL and anyone is waiting on it. Don't do 634 * it is in the AIL and anyone is waiting on it. Don't do
738 * this if the caller has asked us not to. 635 * this if the caller has asked us not to.
739 */ 636 */
740 xfs_trans_unlocked_item(ip->i_mount, 637 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
741 (xfs_log_item_t*)(ip->i_itemp)); 638 (xfs_log_item_t*)(ip->i_itemp));
742 } 639 }
743 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address); 640 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);