aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJie Liu <jeff.liu@oracle.com>2014-07-24 04:42:21 -0400
committerDave Chinner <david@fromorbit.com>2014-07-24 04:42:21 -0400
commitf3d1e587437b784635459dcfccaedd17149282cc (patch)
tree43e21998feafaf14452c98aea6aac443fa904596
parent4b8fdfecd84528e044a6ca32242de641b203995b (diff)
xfs: introduce xfs_bulkstat_grab_ichunk
From: Jie Liu <jeff.liu@oracle.com> Introduce xfs_bulkstat_grab_ichunk() to look up an inode chunk in where the given inode resides, then grab the record. Update the data for the pointed-to record if the inode was not the last in the chunk and there are some left allocated, return the grabbed inode count on success. Refactor xfs_bulkstat() with it. Signed-off-by: Jie Liu <jeff.liu@oracle.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
-rw-r--r--fs/xfs/xfs_itable.c119
1 files changed, 69 insertions, 50 deletions
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index b50816789f2b..32cf52ee33bc 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -203,6 +203,63 @@ xfs_bulkstat_ichunk_ra(
203 blk_finish_plug(&plug); 203 blk_finish_plug(&plug);
204} 204}
205 205
206/*
207 * Lookup the inode chunk that the given inode lives in and then get the record
208 * if we found the chunk. If the inode was not the last in the chunk and there
209 * are some left allocated, update the data for the pointed-to record as well as
210 * return the count of grabbed inodes.
211 */
212STATIC int
213xfs_bulkstat_grab_ichunk(
214 struct xfs_btree_cur *cur, /* btree cursor */
215 xfs_agino_t agino, /* starting inode of chunk */
216 int *icount,/* return # of inodes grabbed */
217 struct xfs_inobt_rec_incore *irec) /* btree record */
218{
219 int idx; /* index into inode chunk */
220 int stat;
221 int error = 0;
222
223 /* Lookup the inode chunk that this inode lives in */
224 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
225 if (error)
226 return error;
227 if (!stat) {
228 *icount = 0;
229 return error;
230 }
231
232 /* Get the record, should always work */
233 error = xfs_inobt_get_rec(cur, irec, &stat);
234 if (error)
235 return error;
236 XFS_WANT_CORRUPTED_RETURN(stat == 1);
237
238 /* Check if the record contains the inode in request */
239 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
240 return -EINVAL;
241
242 idx = agino - irec->ir_startino + 1;
243 if (idx < XFS_INODES_PER_CHUNK &&
244 (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
245 int i;
246
247 /* We got a right chunk with some left inodes allocated at it.
248 * Grab the chunk record. Mark all the uninteresting inodes
249 * free -- because they're before our start point.
250 */
251 for (i = 0; i < idx; i++) {
252 if (XFS_INOBT_MASK(i) & ~irec->ir_free)
253 irec->ir_freecount++;
254 }
255
256 irec->ir_free |= xfs_inobt_maskn(0, idx);
257 *icount = XFS_INODES_PER_CHUNK - irec->ir_freecount;
258 }
259
260 return 0;
261}
262
206#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 263#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
207 264
208/* 265/*
@@ -290,67 +347,29 @@ xfs_bulkstat(
290 irbp = irbuf; 347 irbp = irbuf;
291 irbufend = irbuf + nirbuf; 348 irbufend = irbuf + nirbuf;
292 end_of_ag = 0; 349 end_of_ag = 0;
293 /* 350 icount = 0;
294 * If we're returning in the middle of an allocation group,
295 * we need to get the remainder of the chunk we're in.
296 */
297 if (agino > 0) { 351 if (agino > 0) {
298 xfs_inobt_rec_incore_t r;
299
300 /* 352 /*
301 * Lookup the inode chunk that this inode lives in. 353 * In the middle of an allocation group, we need to get
354 * the remainder of the chunk we're in.
302 */ 355 */
303 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, 356 struct xfs_inobt_rec_incore r;
304 &tmp); 357
305 if (!error && /* no I/O error */ 358 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
306 tmp && /* lookup succeeded */ 359 if (error)
307 /* got the record, should always work */ 360 break;
308 !(error = xfs_inobt_get_rec(cur, &r, &i)) && 361 if (icount) {
309 i == 1 &&
310 /* this is the right chunk */
311 agino < r.ir_startino + XFS_INODES_PER_CHUNK &&
312 /* lastino was not last in chunk */
313 (chunkidx = agino - r.ir_startino + 1) <
314 XFS_INODES_PER_CHUNK &&
315 /* there are some left allocated */
316 xfs_inobt_maskn(chunkidx,
317 XFS_INODES_PER_CHUNK - chunkidx) &
318 ~r.ir_free) {
319 /*
320 * Grab the chunk record. Mark all the
321 * uninteresting inodes (because they're
322 * before our start point) free.
323 */
324 for (i = 0; i < chunkidx; i++) {
325 if (XFS_INOBT_MASK(i) & ~r.ir_free)
326 r.ir_freecount++;
327 }
328 r.ir_free |= xfs_inobt_maskn(0, chunkidx);
329 irbp->ir_startino = r.ir_startino; 362 irbp->ir_startino = r.ir_startino;
330 irbp->ir_freecount = r.ir_freecount; 363 irbp->ir_freecount = r.ir_freecount;
331 irbp->ir_free = r.ir_free; 364 irbp->ir_free = r.ir_free;
332 irbp++; 365 irbp++;
333 agino = r.ir_startino + XFS_INODES_PER_CHUNK; 366 agino = r.ir_startino + XFS_INODES_PER_CHUNK;
334 icount = XFS_INODES_PER_CHUNK - r.ir_freecount;
335 } else {
336 /*
337 * If any of those tests failed, bump the
338 * inode number (just in case).
339 */
340 agino++;
341 icount = 0;
342 } 367 }
343 /* 368 /* Increment to the next record */
344 * In any case, increment to the next record. 369 error = xfs_btree_increment(cur, 0, &tmp);
345 */
346 if (!error)
347 error = xfs_btree_increment(cur, 0, &tmp);
348 } else { 370 } else {
349 /* 371 /* Start of ag. Lookup the first inode chunk */
350 * Start of ag. Lookup the first inode chunk.
351 */
352 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); 372 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
353 icount = 0;
354 } 373 }
355 if (error) 374 if (error)
356 break; 375 break;