diff options
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_sync.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.c | 479 |
1 files changed, 253 insertions, 226 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index f7ba76633c29..b619d6b8ca43 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -43,166 +43,267 @@ | |||
43 | #include "xfs_buf_item.h" | 43 | #include "xfs_buf_item.h" |
44 | #include "xfs_inode_item.h" | 44 | #include "xfs_inode_item.h" |
45 | #include "xfs_rw.h" | 45 | #include "xfs_rw.h" |
46 | #include "xfs_quota.h" | ||
46 | 47 | ||
47 | #include <linux/kthread.h> | 48 | #include <linux/kthread.h> |
48 | #include <linux/freezer.h> | 49 | #include <linux/freezer.h> |
49 | 50 | ||
50 | /* | ||
51 | * Sync all the inodes in the given AG according to the | ||
52 | * direction given by the flags. | ||
53 | */ | ||
54 | STATIC int | ||
55 | xfs_sync_inodes_ag( | ||
56 | xfs_mount_t *mp, | ||
57 | int ag, | ||
58 | int flags) | ||
59 | { | ||
60 | xfs_perag_t *pag = &mp->m_perag[ag]; | ||
61 | int nr_found; | ||
62 | uint32_t first_index = 0; | ||
63 | int error = 0; | ||
64 | int last_error = 0; | ||
65 | 51 | ||
66 | do { | 52 | STATIC xfs_inode_t * |
67 | struct inode *inode; | 53 | xfs_inode_ag_lookup( |
68 | xfs_inode_t *ip = NULL; | 54 | struct xfs_mount *mp, |
69 | int lock_flags = XFS_ILOCK_SHARED; | 55 | struct xfs_perag *pag, |
56 | uint32_t *first_index, | ||
57 | int tag) | ||
58 | { | ||
59 | int nr_found; | ||
60 | struct xfs_inode *ip; | ||
70 | 61 | ||
71 | /* | 62 | /* |
72 | * use a gang lookup to find the next inode in the tree | 63 | * use a gang lookup to find the next inode in the tree |
73 | * as the tree is sparse and a gang lookup walks to find | 64 | * as the tree is sparse and a gang lookup walks to find |
74 | * the number of objects requested. | 65 | * the number of objects requested. |
75 | */ | 66 | */ |
76 | read_lock(&pag->pag_ici_lock); | 67 | read_lock(&pag->pag_ici_lock); |
68 | if (tag == XFS_ICI_NO_TAG) { | ||
77 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | 69 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, |
78 | (void**)&ip, first_index, 1); | 70 | (void **)&ip, *first_index, 1); |
71 | } else { | ||
72 | nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, | ||
73 | (void **)&ip, *first_index, 1, tag); | ||
74 | } | ||
75 | if (!nr_found) | ||
76 | goto unlock; | ||
79 | 77 | ||
80 | if (!nr_found) { | 78 | /* |
81 | read_unlock(&pag->pag_ici_lock); | 79 | * Update the index for the next lookup. Catch overflows |
82 | break; | 80 | * into the next AG range which can occur if we have inodes |
83 | } | 81 | * in the last block of the AG and we are currently |
82 | * pointing to the last inode. | ||
83 | */ | ||
84 | *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
85 | if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | ||
86 | goto unlock; | ||
84 | 87 | ||
85 | /* | 88 | return ip; |
86 | * Update the index for the next lookup. Catch overflows | ||
87 | * into the next AG range which can occur if we have inodes | ||
88 | * in the last block of the AG and we are currently | ||
89 | * pointing to the last inode. | ||
90 | */ | ||
91 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
92 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { | ||
93 | read_unlock(&pag->pag_ici_lock); | ||
94 | break; | ||
95 | } | ||
96 | 89 | ||
97 | /* nothing to sync during shutdown */ | 90 | unlock: |
98 | if (XFS_FORCED_SHUTDOWN(mp)) { | 91 | read_unlock(&pag->pag_ici_lock); |
99 | read_unlock(&pag->pag_ici_lock); | 92 | return NULL; |
100 | return 0; | 93 | } |
101 | } | ||
102 | 94 | ||
103 | /* | 95 | STATIC int |
104 | * If we can't get a reference on the inode, it must be | 96 | xfs_inode_ag_walk( |
105 | * in reclaim. Leave it for the reclaim code to flush. | 97 | struct xfs_mount *mp, |
106 | */ | 98 | xfs_agnumber_t ag, |
107 | inode = VFS_I(ip); | 99 | int (*execute)(struct xfs_inode *ip, |
108 | if (!igrab(inode)) { | 100 | struct xfs_perag *pag, int flags), |
109 | read_unlock(&pag->pag_ici_lock); | 101 | int flags, |
110 | continue; | 102 | int tag) |
111 | } | 103 | { |
112 | read_unlock(&pag->pag_ici_lock); | 104 | struct xfs_perag *pag = &mp->m_perag[ag]; |
105 | uint32_t first_index; | ||
106 | int last_error = 0; | ||
107 | int skipped; | ||
113 | 108 | ||
114 | /* avoid new or bad inodes */ | 109 | restart: |
115 | if (is_bad_inode(inode) || | 110 | skipped = 0; |
116 | xfs_iflags_test(ip, XFS_INEW)) { | 111 | first_index = 0; |
117 | IRELE(ip); | 112 | do { |
118 | continue; | 113 | int error = 0; |
119 | } | 114 | xfs_inode_t *ip; |
120 | 115 | ||
121 | /* | 116 | ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag); |
122 | * If we have to flush data or wait for I/O completion | 117 | if (!ip) |
123 | * we need to hold the iolock. | 118 | break; |
124 | */ | ||
125 | if (flags & SYNC_DELWRI) { | ||
126 | if (VN_DIRTY(inode)) { | ||
127 | if (flags & SYNC_TRYLOCK) { | ||
128 | if (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) | ||
129 | lock_flags |= XFS_IOLOCK_SHARED; | ||
130 | } else { | ||
131 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | ||
132 | lock_flags |= XFS_IOLOCK_SHARED; | ||
133 | } | ||
134 | if (lock_flags & XFS_IOLOCK_SHARED) { | ||
135 | error = xfs_flush_pages(ip, 0, -1, | ||
136 | (flags & SYNC_WAIT) ? 0 | ||
137 | : XFS_B_ASYNC, | ||
138 | FI_NONE); | ||
139 | } | ||
140 | } | ||
141 | if (VN_CACHED(inode) && (flags & SYNC_IOWAIT)) | ||
142 | xfs_ioend_wait(ip); | ||
143 | } | ||
144 | xfs_ilock(ip, XFS_ILOCK_SHARED); | ||
145 | |||
146 | if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) { | ||
147 | if (flags & SYNC_WAIT) { | ||
148 | xfs_iflock(ip); | ||
149 | if (!xfs_inode_clean(ip)) | ||
150 | error = xfs_iflush(ip, XFS_IFLUSH_SYNC); | ||
151 | else | ||
152 | xfs_ifunlock(ip); | ||
153 | } else if (xfs_iflock_nowait(ip)) { | ||
154 | if (!xfs_inode_clean(ip)) | ||
155 | error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); | ||
156 | else | ||
157 | xfs_ifunlock(ip); | ||
158 | } | ||
159 | } | ||
160 | xfs_iput(ip, lock_flags); | ||
161 | 119 | ||
120 | error = execute(ip, pag, flags); | ||
121 | if (error == EAGAIN) { | ||
122 | skipped++; | ||
123 | continue; | ||
124 | } | ||
162 | if (error) | 125 | if (error) |
163 | last_error = error; | 126 | last_error = error; |
164 | /* | 127 | /* |
165 | * bail out if the filesystem is corrupted. | 128 | * bail out if the filesystem is corrupted. |
166 | */ | 129 | */ |
167 | if (error == EFSCORRUPTED) | 130 | if (error == EFSCORRUPTED) |
168 | return XFS_ERROR(error); | 131 | break; |
169 | 132 | ||
170 | } while (nr_found); | 133 | } while (1); |
134 | |||
135 | if (skipped) { | ||
136 | delay(1); | ||
137 | goto restart; | ||
138 | } | ||
171 | 139 | ||
140 | xfs_put_perag(mp, pag); | ||
172 | return last_error; | 141 | return last_error; |
173 | } | 142 | } |
174 | 143 | ||
175 | int | 144 | int |
176 | xfs_sync_inodes( | 145 | xfs_inode_ag_iterator( |
177 | xfs_mount_t *mp, | 146 | struct xfs_mount *mp, |
178 | int flags) | 147 | int (*execute)(struct xfs_inode *ip, |
148 | struct xfs_perag *pag, int flags), | ||
149 | int flags, | ||
150 | int tag) | ||
179 | { | 151 | { |
180 | int error; | 152 | int error = 0; |
181 | int last_error; | 153 | int last_error = 0; |
182 | int i; | 154 | xfs_agnumber_t ag; |
183 | int lflags = XFS_LOG_FORCE; | ||
184 | 155 | ||
185 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 156 | for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { |
186 | return 0; | 157 | if (!mp->m_perag[ag].pag_ici_init) |
187 | error = 0; | 158 | continue; |
188 | last_error = 0; | 159 | error = xfs_inode_ag_walk(mp, ag, execute, flags, tag); |
160 | if (error) { | ||
161 | last_error = error; | ||
162 | if (error == EFSCORRUPTED) | ||
163 | break; | ||
164 | } | ||
165 | } | ||
166 | return XFS_ERROR(last_error); | ||
167 | } | ||
168 | |||
169 | /* must be called with pag_ici_lock held and releases it */ | ||
170 | int | ||
171 | xfs_sync_inode_valid( | ||
172 | struct xfs_inode *ip, | ||
173 | struct xfs_perag *pag) | ||
174 | { | ||
175 | struct inode *inode = VFS_I(ip); | ||
176 | |||
177 | /* nothing to sync during shutdown */ | ||
178 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { | ||
179 | read_unlock(&pag->pag_ici_lock); | ||
180 | return EFSCORRUPTED; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * If we can't get a reference on the inode, it must be in reclaim. | ||
185 | * Leave it for the reclaim code to flush. Also avoid inodes that | ||
186 | * haven't been fully initialised. | ||
187 | */ | ||
188 | if (!igrab(inode)) { | ||
189 | read_unlock(&pag->pag_ici_lock); | ||
190 | return ENOENT; | ||
191 | } | ||
192 | read_unlock(&pag->pag_ici_lock); | ||
193 | |||
194 | if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) { | ||
195 | IRELE(ip); | ||
196 | return ENOENT; | ||
197 | } | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | STATIC int | ||
203 | xfs_sync_inode_data( | ||
204 | struct xfs_inode *ip, | ||
205 | struct xfs_perag *pag, | ||
206 | int flags) | ||
207 | { | ||
208 | struct inode *inode = VFS_I(ip); | ||
209 | struct address_space *mapping = inode->i_mapping; | ||
210 | int error = 0; | ||
211 | |||
212 | error = xfs_sync_inode_valid(ip, pag); | ||
213 | if (error) | ||
214 | return error; | ||
215 | |||
216 | if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | ||
217 | goto out_wait; | ||
218 | |||
219 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { | ||
220 | if (flags & SYNC_TRYLOCK) | ||
221 | goto out_wait; | ||
222 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | ||
223 | } | ||
224 | |||
225 | error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? | ||
226 | 0 : XFS_B_ASYNC, FI_NONE); | ||
227 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | ||
189 | 228 | ||
229 | out_wait: | ||
190 | if (flags & SYNC_WAIT) | 230 | if (flags & SYNC_WAIT) |
191 | lflags |= XFS_LOG_SYNC; | 231 | xfs_ioend_wait(ip); |
232 | IRELE(ip); | ||
233 | return error; | ||
234 | } | ||
192 | 235 | ||
193 | for (i = 0; i < mp->m_sb.sb_agcount; i++) { | 236 | STATIC int |
194 | if (!mp->m_perag[i].pag_ici_init) | 237 | xfs_sync_inode_attr( |
195 | continue; | 238 | struct xfs_inode *ip, |
196 | error = xfs_sync_inodes_ag(mp, i, flags); | 239 | struct xfs_perag *pag, |
197 | if (error) | 240 | int flags) |
198 | last_error = error; | 241 | { |
199 | if (error == EFSCORRUPTED) | 242 | int error = 0; |
200 | break; | 243 | |
244 | error = xfs_sync_inode_valid(ip, pag); | ||
245 | if (error) | ||
246 | return error; | ||
247 | |||
248 | xfs_ilock(ip, XFS_ILOCK_SHARED); | ||
249 | if (xfs_inode_clean(ip)) | ||
250 | goto out_unlock; | ||
251 | if (!xfs_iflock_nowait(ip)) { | ||
252 | if (!(flags & SYNC_WAIT)) | ||
253 | goto out_unlock; | ||
254 | xfs_iflock(ip); | ||
201 | } | 255 | } |
202 | if (flags & SYNC_DELWRI) | ||
203 | xfs_log_force(mp, 0, lflags); | ||
204 | 256 | ||
205 | return XFS_ERROR(last_error); | 257 | if (xfs_inode_clean(ip)) { |
258 | xfs_ifunlock(ip); | ||
259 | goto out_unlock; | ||
260 | } | ||
261 | |||
262 | error = xfs_iflush(ip, (flags & SYNC_WAIT) ? | ||
263 | XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI); | ||
264 | |||
265 | out_unlock: | ||
266 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
267 | IRELE(ip); | ||
268 | return error; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Write out pagecache data for the whole filesystem. | ||
273 | */ | ||
274 | int | ||
275 | xfs_sync_data( | ||
276 | struct xfs_mount *mp, | ||
277 | int flags) | ||
278 | { | ||
279 | int error; | ||
280 | |||
281 | ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); | ||
282 | |||
283 | error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags, | ||
284 | XFS_ICI_NO_TAG); | ||
285 | if (error) | ||
286 | return XFS_ERROR(error); | ||
287 | |||
288 | xfs_log_force(mp, 0, | ||
289 | (flags & SYNC_WAIT) ? | ||
290 | XFS_LOG_FORCE | XFS_LOG_SYNC : | ||
291 | XFS_LOG_FORCE); | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * Write out inode metadata (attributes) for the whole filesystem. | ||
297 | */ | ||
298 | int | ||
299 | xfs_sync_attr( | ||
300 | struct xfs_mount *mp, | ||
301 | int flags) | ||
302 | { | ||
303 | ASSERT((flags & ~SYNC_WAIT) == 0); | ||
304 | |||
305 | return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags, | ||
306 | XFS_ICI_NO_TAG); | ||
206 | } | 307 | } |
207 | 308 | ||
208 | STATIC int | 309 | STATIC int |
@@ -252,7 +353,7 @@ xfs_sync_fsdata( | |||
252 | * If this is xfssyncd() then only sync the superblock if we can | 353 | * If this is xfssyncd() then only sync the superblock if we can |
253 | * lock it without sleeping and it is not pinned. | 354 | * lock it without sleeping and it is not pinned. |
254 | */ | 355 | */ |
255 | if (flags & SYNC_BDFLUSH) { | 356 | if (flags & SYNC_TRYLOCK) { |
256 | ASSERT(!(flags & SYNC_WAIT)); | 357 | ASSERT(!(flags & SYNC_WAIT)); |
257 | 358 | ||
258 | bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); | 359 | bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); |
@@ -316,13 +417,13 @@ xfs_quiesce_data( | |||
316 | int error; | 417 | int error; |
317 | 418 | ||
318 | /* push non-blocking */ | 419 | /* push non-blocking */ |
319 | xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH); | 420 | xfs_sync_data(mp, 0); |
320 | XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); | 421 | xfs_qm_sync(mp, SYNC_TRYLOCK); |
321 | xfs_filestream_flush(mp); | 422 | xfs_filestream_flush(mp); |
322 | 423 | ||
323 | /* push and block */ | 424 | /* push and block */ |
324 | xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT); | 425 | xfs_sync_data(mp, SYNC_WAIT); |
325 | XFS_QM_DQSYNC(mp, SYNC_WAIT); | 426 | xfs_qm_sync(mp, SYNC_WAIT); |
326 | 427 | ||
327 | /* write superblock and hoover up shutdown errors */ | 428 | /* write superblock and hoover up shutdown errors */ |
328 | error = xfs_sync_fsdata(mp, 0); | 429 | error = xfs_sync_fsdata(mp, 0); |
@@ -341,7 +442,7 @@ xfs_quiesce_fs( | |||
341 | int count = 0, pincount; | 442 | int count = 0, pincount; |
342 | 443 | ||
343 | xfs_flush_buftarg(mp->m_ddev_targp, 0); | 444 | xfs_flush_buftarg(mp->m_ddev_targp, 0); |
344 | xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); | 445 | xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC); |
345 | 446 | ||
346 | /* | 447 | /* |
347 | * This loop must run at least twice. The first instance of the loop | 448 | * This loop must run at least twice. The first instance of the loop |
@@ -350,7 +451,7 @@ xfs_quiesce_fs( | |||
350 | * logged before we can write the unmount record. | 451 | * logged before we can write the unmount record. |
351 | */ | 452 | */ |
352 | do { | 453 | do { |
353 | xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT); | 454 | xfs_sync_attr(mp, SYNC_WAIT); |
354 | pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); | 455 | pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); |
355 | if (!pincount) { | 456 | if (!pincount) { |
356 | delay(50); | 457 | delay(50); |
@@ -433,8 +534,8 @@ xfs_flush_inodes_work( | |||
433 | void *arg) | 534 | void *arg) |
434 | { | 535 | { |
435 | struct inode *inode = arg; | 536 | struct inode *inode = arg; |
436 | xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK); | 537 | xfs_sync_data(mp, SYNC_TRYLOCK); |
437 | xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK | SYNC_IOWAIT); | 538 | xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); |
438 | iput(inode); | 539 | iput(inode); |
439 | } | 540 | } |
440 | 541 | ||
@@ -465,10 +566,10 @@ xfs_sync_worker( | |||
465 | 566 | ||
466 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { | 567 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { |
467 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); | 568 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); |
468 | xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); | 569 | xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC); |
469 | /* dgc: errors ignored here */ | 570 | /* dgc: errors ignored here */ |
470 | error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); | 571 | error = xfs_qm_sync(mp, SYNC_TRYLOCK); |
471 | error = xfs_sync_fsdata(mp, SYNC_BDFLUSH); | 572 | error = xfs_sync_fsdata(mp, SYNC_TRYLOCK); |
472 | if (xfs_log_need_covered(mp)) | 573 | if (xfs_log_need_covered(mp)) |
473 | error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE); | 574 | error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE); |
474 | } | 575 | } |
@@ -569,7 +670,7 @@ xfs_reclaim_inode( | |||
569 | xfs_ifunlock(ip); | 670 | xfs_ifunlock(ip); |
570 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 671 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
571 | } | 672 | } |
572 | return 1; | 673 | return -EAGAIN; |
573 | } | 674 | } |
574 | __xfs_iflags_set(ip, XFS_IRECLAIM); | 675 | __xfs_iflags_set(ip, XFS_IRECLAIM); |
575 | spin_unlock(&ip->i_flags_lock); | 676 | spin_unlock(&ip->i_flags_lock); |
@@ -654,101 +755,27 @@ xfs_inode_clear_reclaim_tag( | |||
654 | xfs_put_perag(mp, pag); | 755 | xfs_put_perag(mp, pag); |
655 | } | 756 | } |
656 | 757 | ||
657 | 758 | STATIC int | |
658 | STATIC void | 759 | xfs_reclaim_inode_now( |
659 | xfs_reclaim_inodes_ag( | 760 | struct xfs_inode *ip, |
660 | xfs_mount_t *mp, | 761 | struct xfs_perag *pag, |
661 | int ag, | 762 | int flags) |
662 | int noblock, | ||
663 | int mode) | ||
664 | { | 763 | { |
665 | xfs_inode_t *ip = NULL; | 764 | /* ignore if already under reclaim */ |
666 | xfs_perag_t *pag = &mp->m_perag[ag]; | 765 | if (xfs_iflags_test(ip, XFS_IRECLAIM)) { |
667 | int nr_found; | ||
668 | uint32_t first_index; | ||
669 | int skipped; | ||
670 | |||
671 | restart: | ||
672 | first_index = 0; | ||
673 | skipped = 0; | ||
674 | do { | ||
675 | /* | ||
676 | * use a gang lookup to find the next inode in the tree | ||
677 | * as the tree is sparse and a gang lookup walks to find | ||
678 | * the number of objects requested. | ||
679 | */ | ||
680 | read_lock(&pag->pag_ici_lock); | ||
681 | nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, | ||
682 | (void**)&ip, first_index, 1, | ||
683 | XFS_ICI_RECLAIM_TAG); | ||
684 | |||
685 | if (!nr_found) { | ||
686 | read_unlock(&pag->pag_ici_lock); | ||
687 | break; | ||
688 | } | ||
689 | |||
690 | /* | ||
691 | * Update the index for the next lookup. Catch overflows | ||
692 | * into the next AG range which can occur if we have inodes | ||
693 | * in the last block of the AG and we are currently | ||
694 | * pointing to the last inode. | ||
695 | */ | ||
696 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
697 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { | ||
698 | read_unlock(&pag->pag_ici_lock); | ||
699 | break; | ||
700 | } | ||
701 | |||
702 | /* ignore if already under reclaim */ | ||
703 | if (xfs_iflags_test(ip, XFS_IRECLAIM)) { | ||
704 | read_unlock(&pag->pag_ici_lock); | ||
705 | continue; | ||
706 | } | ||
707 | |||
708 | if (noblock) { | ||
709 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { | ||
710 | read_unlock(&pag->pag_ici_lock); | ||
711 | continue; | ||
712 | } | ||
713 | if (xfs_ipincount(ip) || | ||
714 | !xfs_iflock_nowait(ip)) { | ||
715 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
716 | read_unlock(&pag->pag_ici_lock); | ||
717 | continue; | ||
718 | } | ||
719 | } | ||
720 | read_unlock(&pag->pag_ici_lock); | 766 | read_unlock(&pag->pag_ici_lock); |
721 | 767 | return 0; | |
722 | /* | ||
723 | * hmmm - this is an inode already in reclaim. Do | ||
724 | * we even bother catching it here? | ||
725 | */ | ||
726 | if (xfs_reclaim_inode(ip, noblock, mode)) | ||
727 | skipped++; | ||
728 | } while (nr_found); | ||
729 | |||
730 | if (skipped) { | ||
731 | delay(1); | ||
732 | goto restart; | ||
733 | } | 768 | } |
734 | return; | 769 | read_unlock(&pag->pag_ici_lock); |
735 | 770 | ||
771 | return xfs_reclaim_inode(ip, 0, flags); | ||
736 | } | 772 | } |
737 | 773 | ||
738 | int | 774 | int |
739 | xfs_reclaim_inodes( | 775 | xfs_reclaim_inodes( |
740 | xfs_mount_t *mp, | 776 | xfs_mount_t *mp, |
741 | int noblock, | ||
742 | int mode) | 777 | int mode) |
743 | { | 778 | { |
744 | int i; | 779 | return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode, |
745 | 780 | XFS_ICI_RECLAIM_TAG); | |
746 | for (i = 0; i < mp->m_sb.sb_agcount; i++) { | ||
747 | if (!mp->m_perag[i].pag_ici_init) | ||
748 | continue; | ||
749 | xfs_reclaim_inodes_ag(mp, i, noblock, mode); | ||
750 | } | ||
751 | return 0; | ||
752 | } | 781 | } |
753 | |||
754 | |||