aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <david@fromorbit.com>2014-08-03 23:54:46 -0400
committerDave Chinner <david@fromorbit.com>2014-08-03 23:54:46 -0400
commitb076d8720d793cde04b75b4941b8774e209649b4 (patch)
tree14f0aa5ac5850e2077076062340eb1ef15f7ccf1 /fs/xfs
parent4d7eece2c0dad832c5f224629eba3cced3f2d6cd (diff)
parent1e773c4989d2dfe08332b4c18f7e1d7ad633015c (diff)
Merge branch 'xfs-bulkstat-refactor' into for-next
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_ioctl.c4
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_itable.c569
-rw-r--r--fs/xfs/xfs_itable.h23
4 files changed, 283 insertions, 315 deletions
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 30983b8ceaa1..494237ed4a65 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -796,8 +796,8 @@ xfs_ioc_bulkstat(
796 error = xfs_inumbers(mp, &inlast, &count, 796 error = xfs_inumbers(mp, &inlast, &count,
797 bulkreq.ubuffer, xfs_inumbers_fmt); 797 bulkreq.ubuffer, xfs_inumbers_fmt);
798 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) 798 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
799 error = xfs_bulkstat_single(mp, &inlast, 799 error = xfs_bulkstat_one(mp, inlast, bulkreq.ubuffer,
800 bulkreq.ubuffer, &done); 800 sizeof(xfs_bstat_t), NULL, &done);
801 else /* XFS_IOC_FSBULKSTAT */ 801 else /* XFS_IOC_FSBULKSTAT */
802 error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, 802 error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one,
803 sizeof(xfs_bstat_t), bulkreq.ubuffer, 803 sizeof(xfs_bstat_t), bulkreq.ubuffer,
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index e65ea67e3ae3..cf63418bf05f 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -102,7 +102,7 @@ xfs_compat_growfs_rt_copyin(
102STATIC int 102STATIC int
103xfs_inumbers_fmt_compat( 103xfs_inumbers_fmt_compat(
104 void __user *ubuffer, 104 void __user *ubuffer,
105 const xfs_inogrp_t *buffer, 105 const struct xfs_inogrp *buffer,
106 long count, 106 long count,
107 long *written) 107 long *written)
108{ 108{
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 7e54992bcae9..f71be9c68017 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -76,10 +76,8 @@ xfs_bulkstat_one_int(
76 error = xfs_iget(mp, NULL, ino, 76 error = xfs_iget(mp, NULL, ino,
77 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), 77 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
78 XFS_ILOCK_SHARED, &ip); 78 XFS_ILOCK_SHARED, &ip);
79 if (error) { 79 if (error)
80 *stat = BULKSTAT_RV_NOTHING;
81 goto out_free; 80 goto out_free;
82 }
83 81
84 ASSERT(ip != NULL); 82 ASSERT(ip != NULL);
85 ASSERT(ip->i_imap.im_blkno != 0); 83 ASSERT(ip->i_imap.im_blkno != 0);
@@ -136,7 +134,6 @@ xfs_bulkstat_one_int(
136 IRELE(ip); 134 IRELE(ip);
137 135
138 error = formatter(buffer, ubsize, ubused, buf); 136 error = formatter(buffer, ubsize, ubused, buf);
139
140 if (!error) 137 if (!error)
141 *stat = BULKSTAT_RV_DIDONE; 138 *stat = BULKSTAT_RV_DIDONE;
142 139
@@ -175,9 +172,170 @@ xfs_bulkstat_one(
175 xfs_bulkstat_one_fmt, ubused, stat); 172 xfs_bulkstat_one_fmt, ubused, stat);
176} 173}
177 174
175/*
176 * Loop over all clusters in a chunk for a given incore inode allocation btree
177 * record. Do a readahead if there are any allocated inodes in that cluster.
178 */
179STATIC void
180xfs_bulkstat_ichunk_ra(
181 struct xfs_mount *mp,
182 xfs_agnumber_t agno,
183 struct xfs_inobt_rec_incore *irec)
184{
185 xfs_agblock_t agbno;
186 struct blk_plug plug;
187 int blks_per_cluster;
188 int inodes_per_cluster;
189 int i; /* inode chunk index */
190
191 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
192 blks_per_cluster = xfs_icluster_size_fsb(mp);
193 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
194
195 blk_start_plug(&plug);
196 for (i = 0; i < XFS_INODES_PER_CHUNK;
197 i += inodes_per_cluster, agbno += blks_per_cluster) {
198 if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
199 xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
200 &xfs_inode_buf_ops);
201 }
202 }
203 blk_finish_plug(&plug);
204}
205
206/*
207 * Lookup the inode chunk that the given inode lives in and then get the record
208 * if we found the chunk. If the inode was not the last in the chunk and there
209 * are some left allocated, update the data for the pointed-to record as well as
210 * return the count of grabbed inodes.
211 */
212STATIC int
213xfs_bulkstat_grab_ichunk(
214 struct xfs_btree_cur *cur, /* btree cursor */
215 xfs_agino_t agino, /* starting inode of chunk */
216 int *icount,/* return # of inodes grabbed */
217 struct xfs_inobt_rec_incore *irec) /* btree record */
218{
219 int idx; /* index into inode chunk */
220 int stat;
221 int error = 0;
222
223 /* Lookup the inode chunk that this inode lives in */
224 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
225 if (error)
226 return error;
227 if (!stat) {
228 *icount = 0;
229 return error;
230 }
231
232 /* Get the record, should always work */
233 error = xfs_inobt_get_rec(cur, irec, &stat);
234 if (error)
235 return error;
236 XFS_WANT_CORRUPTED_RETURN(stat == 1);
237
238 /* Check if the record contains the inode in request */
239 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
240 return -EINVAL;
241
242 idx = agino - irec->ir_startino + 1;
243 if (idx < XFS_INODES_PER_CHUNK &&
244 (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
245 int i;
246
247 /* We got a right chunk with some left inodes allocated at it.
248 * Grab the chunk record. Mark all the uninteresting inodes
249 * free -- because they're before our start point.
250 */
251 for (i = 0; i < idx; i++) {
252 if (XFS_INOBT_MASK(i) & ~irec->ir_free)
253 irec->ir_freecount++;
254 }
255
256 irec->ir_free |= xfs_inobt_maskn(0, idx);
257 *icount = XFS_INODES_PER_CHUNK - irec->ir_freecount;
258 }
259
260 return 0;
261}
262
178#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 263#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
179 264
180/* 265/*
266 * Process inodes in chunk with a pointer to a formatter function
267 * that will iget the inode and fill in the appropriate structure.
268 */
269int
270xfs_bulkstat_ag_ichunk(
271 struct xfs_mount *mp,
272 xfs_agnumber_t agno,
273 struct xfs_inobt_rec_incore *irbp,
274 bulkstat_one_pf formatter,
275 size_t statstruct_size,
276 struct xfs_bulkstat_agichunk *acp)
277{
278 xfs_ino_t lastino = acp->ac_lastino;
279 char __user **ubufp = acp->ac_ubuffer;
280 int ubleft = acp->ac_ubleft;
281 int ubelem = acp->ac_ubelem;
282 int chunkidx, clustidx;
283 int error = 0;
284 xfs_agino_t agino;
285
286 for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
287 XFS_BULKSTAT_UBLEFT(ubleft) &&
288 irbp->ir_freecount < XFS_INODES_PER_CHUNK;
289 chunkidx++, clustidx++, agino++) {
290 int fmterror; /* bulkstat formatter result */
291 int ubused;
292 xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino);
293
294 ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
295
296 /* Skip if this inode is free */
297 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
298 lastino = ino;
299 continue;
300 }
301
302 /*
303 * Count used inodes as free so we can tell when the
304 * chunk is used up.
305 */
306 irbp->ir_freecount++;
307
308 /* Get the inode and fill in a single buffer */
309 ubused = statstruct_size;
310 error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror);
311 if (fmterror == BULKSTAT_RV_NOTHING) {
312 if (error && error != -ENOENT && error != -EINVAL) {
313 ubleft = 0;
314 break;
315 }
316 lastino = ino;
317 continue;
318 }
319 if (fmterror == BULKSTAT_RV_GIVEUP) {
320 ubleft = 0;
321 ASSERT(error);
322 break;
323 }
324 if (*ubufp)
325 *ubufp += ubused;
326 ubleft -= ubused;
327 ubelem++;
328 lastino = ino;
329 }
330
331 acp->ac_lastino = lastino;
332 acp->ac_ubleft = ubleft;
333 acp->ac_ubelem = ubelem;
334
335 return error;
336}
337
338/*
181 * Return stat information in bulk (by-inode) for the filesystem. 339 * Return stat information in bulk (by-inode) for the filesystem.
182 */ 340 */
183int /* error status */ 341int /* error status */
@@ -190,13 +348,10 @@ xfs_bulkstat(
190 char __user *ubuffer, /* buffer with inode stats */ 348 char __user *ubuffer, /* buffer with inode stats */
191 int *done) /* 1 if there are more stats to get */ 349 int *done) /* 1 if there are more stats to get */
192{ 350{
193 xfs_agblock_t agbno=0;/* allocation group block number */
194 xfs_buf_t *agbp; /* agi header buffer */ 351 xfs_buf_t *agbp; /* agi header buffer */
195 xfs_agi_t *agi; /* agi header data */ 352 xfs_agi_t *agi; /* agi header data */
196 xfs_agino_t agino; /* inode # in allocation group */ 353 xfs_agino_t agino; /* inode # in allocation group */
197 xfs_agnumber_t agno; /* allocation group number */ 354 xfs_agnumber_t agno; /* allocation group number */
198 int chunkidx; /* current index into inode chunk */
199 int clustidx; /* current index into inode cluster */
200 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 355 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
201 int end_of_ag; /* set if we've seen the ag end */ 356 int end_of_ag; /* set if we've seen the ag end */
202 int error; /* error code */ 357 int error; /* error code */
@@ -209,8 +364,6 @@ xfs_bulkstat(
209 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 364 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
210 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ 365 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
211 xfs_ino_t lastino; /* last inode number returned */ 366 xfs_ino_t lastino; /* last inode number returned */
212 int blks_per_cluster; /* # of blocks per cluster */
213 int inodes_per_cluster;/* # of inodes per cluster */
214 int nirbuf; /* size of irbuf */ 367 int nirbuf; /* size of irbuf */
215 int rval; /* return value error code */ 368 int rval; /* return value error code */
216 int tmp; /* result value from btree calls */ 369 int tmp; /* result value from btree calls */
@@ -218,7 +371,6 @@ xfs_bulkstat(
218 int ubleft; /* bytes left in user's buffer */ 371 int ubleft; /* bytes left in user's buffer */
219 char __user *ubufp; /* pointer into user's buffer */ 372 char __user *ubufp; /* pointer into user's buffer */
220 int ubelem; /* spaces used in user's buffer */ 373 int ubelem; /* spaces used in user's buffer */
221 int ubused; /* bytes used by formatter */
222 374
223 /* 375 /*
224 * Get the last inode value, see if there's nothing to do. 376 * Get the last inode value, see if there's nothing to do.
@@ -233,17 +385,13 @@ xfs_bulkstat(
233 *ubcountp = 0; 385 *ubcountp = 0;
234 return 0; 386 return 0;
235 } 387 }
236 if (!ubcountp || *ubcountp <= 0) { 388
237 return -EINVAL;
238 }
239 ubcount = *ubcountp; /* statstruct's */ 389 ubcount = *ubcountp; /* statstruct's */
240 ubleft = ubcount * statstruct_size; /* bytes */ 390 ubleft = ubcount * statstruct_size; /* bytes */
241 *ubcountp = ubelem = 0; 391 *ubcountp = ubelem = 0;
242 *done = 0; 392 *done = 0;
243 fmterror = 0; 393 fmterror = 0;
244 ubufp = ubuffer; 394 ubufp = ubuffer;
245 blks_per_cluster = xfs_icluster_size_fsb(mp);
246 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
247 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 395 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
248 if (!irbuf) 396 if (!irbuf)
249 return -ENOMEM; 397 return -ENOMEM;
@@ -258,14 +406,8 @@ xfs_bulkstat(
258 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { 406 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
259 cond_resched(); 407 cond_resched();
260 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 408 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
261 if (error) { 409 if (error)
262 /* 410 break;
263 * Skip this allocation group and go to the next one.
264 */
265 agno++;
266 agino = 0;
267 continue;
268 }
269 agi = XFS_BUF_TO_AGI(agbp); 411 agi = XFS_BUF_TO_AGI(agbp);
270 /* 412 /*
271 * Allocate and initialize a btree cursor for ialloc btree. 413 * Allocate and initialize a btree cursor for ialloc btree.
@@ -275,96 +417,39 @@ xfs_bulkstat(
275 irbp = irbuf; 417 irbp = irbuf;
276 irbufend = irbuf + nirbuf; 418 irbufend = irbuf + nirbuf;
277 end_of_ag = 0; 419 end_of_ag = 0;
278 /* 420 icount = 0;
279 * If we're returning in the middle of an allocation group,
280 * we need to get the remainder of the chunk we're in.
281 */
282 if (agino > 0) { 421 if (agino > 0) {
283 xfs_inobt_rec_incore_t r;
284
285 /* 422 /*
286 * Lookup the inode chunk that this inode lives in. 423 * In the middle of an allocation group, we need to get
424 * the remainder of the chunk we're in.
287 */ 425 */
288 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, 426 struct xfs_inobt_rec_incore r;
289 &tmp); 427
290 if (!error && /* no I/O error */ 428 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
291 tmp && /* lookup succeeded */ 429 if (error)
292 /* got the record, should always work */ 430 break;
293 !(error = xfs_inobt_get_rec(cur, &r, &i)) && 431 if (icount) {
294 i == 1 &&
295 /* this is the right chunk */
296 agino < r.ir_startino + XFS_INODES_PER_CHUNK &&
297 /* lastino was not last in chunk */
298 (chunkidx = agino - r.ir_startino + 1) <
299 XFS_INODES_PER_CHUNK &&
300 /* there are some left allocated */
301 xfs_inobt_maskn(chunkidx,
302 XFS_INODES_PER_CHUNK - chunkidx) &
303 ~r.ir_free) {
304 /*
305 * Grab the chunk record. Mark all the
306 * uninteresting inodes (because they're
307 * before our start point) free.
308 */
309 for (i = 0; i < chunkidx; i++) {
310 if (XFS_INOBT_MASK(i) & ~r.ir_free)
311 r.ir_freecount++;
312 }
313 r.ir_free |= xfs_inobt_maskn(0, chunkidx);
314 irbp->ir_startino = r.ir_startino; 432 irbp->ir_startino = r.ir_startino;
315 irbp->ir_freecount = r.ir_freecount; 433 irbp->ir_freecount = r.ir_freecount;
316 irbp->ir_free = r.ir_free; 434 irbp->ir_free = r.ir_free;
317 irbp++; 435 irbp++;
318 agino = r.ir_startino + XFS_INODES_PER_CHUNK; 436 agino = r.ir_startino + XFS_INODES_PER_CHUNK;
319 icount = XFS_INODES_PER_CHUNK - r.ir_freecount;
320 } else {
321 /*
322 * If any of those tests failed, bump the
323 * inode number (just in case).
324 */
325 agino++;
326 icount = 0;
327 } 437 }
328 /* 438 /* Increment to the next record */
329 * In any case, increment to the next record. 439 error = xfs_btree_increment(cur, 0, &tmp);
330 */
331 if (!error)
332 error = xfs_btree_increment(cur, 0, &tmp);
333 } else { 440 } else {
334 /* 441 /* Start of ag. Lookup the first inode chunk */
335 * Start of ag. Lookup the first inode chunk.
336 */
337 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); 442 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
338 icount = 0;
339 } 443 }
444 if (error)
445 break;
446
340 /* 447 /*
341 * Loop through inode btree records in this ag, 448 * Loop through inode btree records in this ag,
342 * until we run out of inodes or space in the buffer. 449 * until we run out of inodes or space in the buffer.
343 */ 450 */
344 while (irbp < irbufend && icount < ubcount) { 451 while (irbp < irbufend && icount < ubcount) {
345 xfs_inobt_rec_incore_t r; 452 struct xfs_inobt_rec_incore r;
346
347 /*
348 * Loop as long as we're unable to read the
349 * inode btree.
350 */
351 while (error) {
352 agino += XFS_INODES_PER_CHUNK;
353 if (XFS_AGINO_TO_AGBNO(mp, agino) >=
354 be32_to_cpu(agi->agi_length))
355 break;
356 error = xfs_inobt_lookup(cur, agino,
357 XFS_LOOKUP_GE, &tmp);
358 cond_resched();
359 }
360 /*
361 * If ran off the end of the ag either with an error,
362 * or the normal way, set end and stop collecting.
363 */
364 if (error) {
365 end_of_ag = 1;
366 break;
367 }
368 453
369 error = xfs_inobt_get_rec(cur, &r, &i); 454 error = xfs_inobt_get_rec(cur, &r, &i);
370 if (error || i == 0) { 455 if (error || i == 0) {
@@ -377,25 +462,7 @@ xfs_bulkstat(
377 * Also start read-ahead now for this chunk. 462 * Also start read-ahead now for this chunk.
378 */ 463 */
379 if (r.ir_freecount < XFS_INODES_PER_CHUNK) { 464 if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
380 struct blk_plug plug; 465 xfs_bulkstat_ichunk_ra(mp, agno, &r);
381 /*
382 * Loop over all clusters in the next chunk.
383 * Do a readahead if there are any allocated
384 * inodes in that cluster.
385 */
386 blk_start_plug(&plug);
387 agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
388 for (chunkidx = 0;
389 chunkidx < XFS_INODES_PER_CHUNK;
390 chunkidx += inodes_per_cluster,
391 agbno += blks_per_cluster) {
392 if (xfs_inobt_maskn(chunkidx,
393 inodes_per_cluster) & ~r.ir_free)
394 xfs_btree_reada_bufs(mp, agno,
395 agbno, blks_per_cluster,
396 &xfs_inode_buf_ops);
397 }
398 blk_finish_plug(&plug);
399 irbp->ir_startino = r.ir_startino; 466 irbp->ir_startino = r.ir_startino;
400 irbp->ir_freecount = r.ir_freecount; 467 irbp->ir_freecount = r.ir_freecount;
401 irbp->ir_free = r.ir_free; 468 irbp->ir_free = r.ir_free;
@@ -422,57 +489,20 @@ xfs_bulkstat(
422 irbufend = irbp; 489 irbufend = irbp;
423 for (irbp = irbuf; 490 for (irbp = irbuf;
424 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { 491 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
425 /* 492 struct xfs_bulkstat_agichunk ac;
426 * Now process this chunk of inodes. 493
427 */ 494 ac.ac_lastino = lastino;
428 for (agino = irbp->ir_startino, chunkidx = clustidx = 0; 495 ac.ac_ubuffer = &ubuffer;
429 XFS_BULKSTAT_UBLEFT(ubleft) && 496 ac.ac_ubleft = ubleft;
430 irbp->ir_freecount < XFS_INODES_PER_CHUNK; 497 ac.ac_ubelem = ubelem;
431 chunkidx++, clustidx++, agino++) { 498 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
432 ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 499 formatter, statstruct_size, &ac);
433 500 if (error)
434 ino = XFS_AGINO_TO_INO(mp, agno, agino); 501 rval = error;
435 /* 502
436 * Skip if this inode is free. 503 lastino = ac.ac_lastino;
437 */ 504 ubleft = ac.ac_ubleft;
438 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { 505 ubelem = ac.ac_ubelem;
439 lastino = ino;
440 continue;
441 }
442 /*
443 * Count used inodes as free so we can tell
444 * when the chunk is used up.
445 */
446 irbp->ir_freecount++;
447
448 /*
449 * Get the inode and fill in a single buffer.
450 */
451 ubused = statstruct_size;
452 error = formatter(mp, ino, ubufp, ubleft,
453 &ubused, &fmterror);
454 if (fmterror == BULKSTAT_RV_NOTHING) {
455 if (error && error != -ENOENT &&
456 error != -EINVAL) {
457 ubleft = 0;
458 rval = error;
459 break;
460 }
461 lastino = ino;
462 continue;
463 }
464 if (fmterror == BULKSTAT_RV_GIVEUP) {
465 ubleft = 0;
466 ASSERT(error);
467 rval = error;
468 break;
469 }
470 if (ubufp)
471 ubufp += ubused;
472 ubleft -= ubused;
473 ubelem++;
474 lastino = ino;
475 }
476 506
477 cond_resched(); 507 cond_resched();
478 } 508 }
@@ -512,58 +542,10 @@ xfs_bulkstat(
512 return rval; 542 return rval;
513} 543}
514 544
515/*
516 * Return stat information in bulk (by-inode) for the filesystem.
517 * Special case for non-sequential one inode bulkstat.
518 */
519int /* error status */
520xfs_bulkstat_single(
521 xfs_mount_t *mp, /* mount point for filesystem */
522 xfs_ino_t *lastinop, /* inode to return */
523 char __user *buffer, /* buffer with inode stats */
524 int *done) /* 1 if there are more stats to get */
525{
526 int count; /* count value for bulkstat call */
527 int error; /* return value */
528 xfs_ino_t ino; /* filesystem inode number */
529 int res; /* result from bs1 */
530
531 /*
532 * note that requesting valid inode numbers which are not allocated
533 * to inodes will most likely cause xfs_imap_to_bp to generate warning
534 * messages about bad magic numbers. This is ok. The fact that
535 * the inode isn't actually an inode is handled by the
536 * error check below. Done this way to make the usual case faster
537 * at the expense of the error case.
538 */
539
540 ino = *lastinop;
541 error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t),
542 NULL, &res);
543 if (error) {
544 /*
545 * Special case way failed, do it the "long" way
546 * to see if that works.
547 */
548 (*lastinop)--;
549 count = 1;
550 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
551 sizeof(xfs_bstat_t), buffer, done))
552 return error;
553 if (count == 0 || (xfs_ino_t)*lastinop != ino)
554 return error == -EFSCORRUPTED ?
555 EINVAL : error;
556 else
557 return 0;
558 }
559 *done = 0;
560 return 0;
561}
562
563int 545int
564xfs_inumbers_fmt( 546xfs_inumbers_fmt(
565 void __user *ubuffer, /* buffer to write to */ 547 void __user *ubuffer, /* buffer to write to */
566 const xfs_inogrp_t *buffer, /* buffer to read from */ 548 const struct xfs_inogrp *buffer, /* buffer to read from */
567 long count, /* # of elements to read */ 549 long count, /* # of elements to read */
568 long *written) /* # of bytes written */ 550 long *written) /* # of bytes written */
569{ 551{
@@ -578,127 +560,104 @@ xfs_inumbers_fmt(
578 */ 560 */
579int /* error status */ 561int /* error status */
580xfs_inumbers( 562xfs_inumbers(
581 xfs_mount_t *mp, /* mount point for filesystem */ 563 struct xfs_mount *mp,/* mount point for filesystem */
582 xfs_ino_t *lastino, /* last inode returned */ 564 xfs_ino_t *lastino,/* last inode returned */
583 int *count, /* size of buffer/count returned */ 565 int *count,/* size of buffer/count returned */
584 void __user *ubuffer,/* buffer with inode descriptions */ 566 void __user *ubuffer,/* buffer with inode descriptions */
585 inumbers_fmt_pf formatter) 567 inumbers_fmt_pf formatter)
586{ 568{
587 xfs_buf_t *agbp; 569 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino);
588 xfs_agino_t agino; 570 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino);
589 xfs_agnumber_t agno; 571 struct xfs_btree_cur *cur = NULL;
590 int bcount; 572 struct xfs_buf *agbp = NULL;
591 xfs_inogrp_t *buffer; 573 struct xfs_inogrp *buffer;
592 int bufidx; 574 int bcount;
593 xfs_btree_cur_t *cur; 575 int left = *count;
594 int error; 576 int bufidx = 0;
595 xfs_inobt_rec_incore_t r; 577 int error = 0;
596 int i; 578
597 xfs_ino_t ino;
598 int left;
599 int tmp;
600
601 ino = (xfs_ino_t)*lastino;
602 agno = XFS_INO_TO_AGNO(mp, ino);
603 agino = XFS_INO_TO_AGINO(mp, ino);
604 left = *count;
605 *count = 0; 579 *count = 0;
580 if (agno >= mp->m_sb.sb_agcount ||
581 *lastino != XFS_AGINO_TO_INO(mp, agno, agino))
582 return error;
583
606 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 584 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
607 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 585 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
608 error = bufidx = 0; 586 do {
609 cur = NULL; 587 struct xfs_inobt_rec_incore r;
610 agbp = NULL; 588 int stat;
611 while (left > 0 && agno < mp->m_sb.sb_agcount) { 589
612 if (agbp == NULL) { 590 if (!agbp) {
613 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 591 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
614 if (error) { 592 if (error)
615 /* 593 break;
616 * If we can't read the AGI of this ag, 594
617 * then just skip to the next one.
618 */
619 ASSERT(cur == NULL);
620 agbp = NULL;
621 agno++;
622 agino = 0;
623 continue;
624 }
625 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 595 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
626 XFS_BTNUM_INO); 596 XFS_BTNUM_INO);
627 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, 597 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
628 &tmp); 598 &stat);
629 if (error) { 599 if (error)
630 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 600 break;
631 cur = NULL; 601 if (!stat)
632 xfs_buf_relse(agbp); 602 goto next_ag;
633 agbp = NULL;
634 /*
635 * Move up the last inode in the current
636 * chunk. The lookup_ge will always get
637 * us the first inode in the next chunk.
638 */
639 agino += XFS_INODES_PER_CHUNK - 1;
640 continue;
641 }
642 }
643 error = xfs_inobt_get_rec(cur, &r, &i);
644 if (error || i == 0) {
645 xfs_buf_relse(agbp);
646 agbp = NULL;
647 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
648 cur = NULL;
649 agno++;
650 agino = 0;
651 continue;
652 } 603 }
604
605 error = xfs_inobt_get_rec(cur, &r, &stat);
606 if (error)
607 break;
608 if (!stat)
609 goto next_ag;
610
653 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; 611 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
654 buffer[bufidx].xi_startino = 612 buffer[bufidx].xi_startino =
655 XFS_AGINO_TO_INO(mp, agno, r.ir_startino); 613 XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
656 buffer[bufidx].xi_alloccount = 614 buffer[bufidx].xi_alloccount =
657 XFS_INODES_PER_CHUNK - r.ir_freecount; 615 XFS_INODES_PER_CHUNK - r.ir_freecount;
658 buffer[bufidx].xi_allocmask = ~r.ir_free; 616 buffer[bufidx].xi_allocmask = ~r.ir_free;
659 bufidx++; 617 if (++bufidx == bcount) {
660 left--; 618 long written;
661 if (bufidx == bcount) { 619
662 long written; 620 error = formatter(ubuffer, buffer, bufidx, &written);
663 if (formatter(ubuffer, buffer, bufidx, &written)) { 621 if (error)
664 error = -EFAULT;
665 break; 622 break;
666 }
667 ubuffer += written; 623 ubuffer += written;
668 *count += bufidx; 624 *count += bufidx;
669 bufidx = 0; 625 bufidx = 0;
670 } 626 }
671 if (left) { 627 if (!--left)
672 error = xfs_btree_increment(cur, 0, &tmp); 628 break;
673 if (error) { 629
674 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 630 error = xfs_btree_increment(cur, 0, &stat);
675 cur = NULL; 631 if (error)
676 xfs_buf_relse(agbp); 632 break;
677 agbp = NULL; 633 if (stat)
678 /* 634 continue;
679 * The agino value has already been bumped. 635
680 * Just try to skip up to it. 636next_ag:
681 */ 637 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
682 agino += XFS_INODES_PER_CHUNK; 638 cur = NULL;
683 continue; 639 xfs_buf_relse(agbp);
684 } 640 agbp = NULL;
685 } 641 agino = 0;
686 } 642 } while (++agno < mp->m_sb.sb_agcount);
643
687 if (!error) { 644 if (!error) {
688 if (bufidx) { 645 if (bufidx) {
689 long written; 646 long written;
690 if (formatter(ubuffer, buffer, bufidx, &written)) 647
691 error = -EFAULT; 648 error = formatter(ubuffer, buffer, bufidx, &written);
692 else 649 if (!error)
693 *count += bufidx; 650 *count += bufidx;
694 } 651 }
695 *lastino = XFS_AGINO_TO_INO(mp, agno, agino); 652 *lastino = XFS_AGINO_TO_INO(mp, agno, agino);
696 } 653 }
654
697 kmem_free(buffer); 655 kmem_free(buffer);
698 if (cur) 656 if (cur)
699 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : 657 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
700 XFS_BTREE_NOERROR)); 658 XFS_BTREE_NOERROR));
701 if (agbp) 659 if (agbp)
702 xfs_buf_relse(agbp); 660 xfs_buf_relse(agbp);
661
703 return error; 662 return error;
704} 663}
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 97295d91d170..aaed08022eb9 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -30,6 +30,22 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
30 int *ubused, 30 int *ubused,
31 int *stat); 31 int *stat);
32 32
33struct xfs_bulkstat_agichunk {
34 xfs_ino_t ac_lastino; /* last inode returned */
35 char __user **ac_ubuffer;/* pointer into user's buffer */
36 int ac_ubleft; /* bytes left in user's buffer */
37 int ac_ubelem; /* spaces used in user's buffer */
38};
39
40int
41xfs_bulkstat_ag_ichunk(
42 struct xfs_mount *mp,
43 xfs_agnumber_t agno,
44 struct xfs_inobt_rec_incore *irbp,
45 bulkstat_one_pf formatter,
46 size_t statstruct_size,
47 struct xfs_bulkstat_agichunk *acp);
48
33/* 49/*
34 * Values for stat return value. 50 * Values for stat return value.
35 */ 51 */
@@ -50,13 +66,6 @@ xfs_bulkstat(
50 char __user *ubuffer,/* buffer with inode stats */ 66 char __user *ubuffer,/* buffer with inode stats */
51 int *done); /* 1 if there are more stats to get */ 67 int *done); /* 1 if there are more stats to get */
52 68
53int
54xfs_bulkstat_single(
55 xfs_mount_t *mp,
56 xfs_ino_t *lastinop,
57 char __user *buffer,
58 int *done);
59
60typedef int (*bulkstat_one_fmt_pf)( /* used size in bytes or negative error */ 69typedef int (*bulkstat_one_fmt_pf)( /* used size in bytes or negative error */
61 void __user *ubuffer, /* buffer to write to */ 70 void __user *ubuffer, /* buffer to write to */
62 int ubsize, /* remaining user buffer sz */ 71 int ubsize, /* remaining user buffer sz */