diff options
Diffstat (limited to 'fs/xfs/xfs_itable.c')
-rw-r--r-- | fs/xfs/xfs_itable.c | 184 |
1 files changed, 117 insertions, 67 deletions
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 46249e4d1fea..7775ddc0b3c6 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -39,6 +39,16 @@ | |||
39 | #include "xfs_error.h" | 39 | #include "xfs_error.h" |
40 | #include "xfs_btree.h" | 40 | #include "xfs_btree.h" |
41 | 41 | ||
42 | int | ||
43 | xfs_internal_inum( | ||
44 | xfs_mount_t *mp, | ||
45 | xfs_ino_t ino) | ||
46 | { | ||
47 | return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || | ||
48 | (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && | ||
49 | (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); | ||
50 | } | ||
51 | |||
42 | STATIC int | 52 | STATIC int |
43 | xfs_bulkstat_one_iget( | 53 | xfs_bulkstat_one_iget( |
44 | xfs_mount_t *mp, /* mount point for filesystem */ | 54 | xfs_mount_t *mp, /* mount point for filesystem */ |
@@ -52,7 +62,8 @@ xfs_bulkstat_one_iget( | |||
52 | bhv_vnode_t *vp; | 62 | bhv_vnode_t *vp; |
53 | int error; | 63 | int error; |
54 | 64 | ||
55 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno); | 65 | error = xfs_iget(mp, NULL, ino, |
66 | XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno); | ||
56 | if (error) { | 67 | if (error) { |
57 | *stat = BULKSTAT_RV_NOTHING; | 68 | *stat = BULKSTAT_RV_NOTHING; |
58 | return error; | 69 | return error; |
@@ -212,17 +223,12 @@ xfs_bulkstat_one( | |||
212 | xfs_dinode_t *dip; /* dinode inode pointer */ | 223 | xfs_dinode_t *dip; /* dinode inode pointer */ |
213 | 224 | ||
214 | dip = (xfs_dinode_t *)dibuff; | 225 | dip = (xfs_dinode_t *)dibuff; |
226 | *stat = BULKSTAT_RV_NOTHING; | ||
215 | 227 | ||
216 | if (!buffer || ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || | 228 | if (!buffer || xfs_internal_inum(mp, ino)) |
217 | (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && | ||
218 | (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))) { | ||
219 | *stat = BULKSTAT_RV_NOTHING; | ||
220 | return XFS_ERROR(EINVAL); | 229 | return XFS_ERROR(EINVAL); |
221 | } | 230 | if (ubsize < sizeof(*buf)) |
222 | if (ubsize < sizeof(*buf)) { | ||
223 | *stat = BULKSTAT_RV_NOTHING; | ||
224 | return XFS_ERROR(ENOMEM); | 231 | return XFS_ERROR(ENOMEM); |
225 | } | ||
226 | 232 | ||
227 | buf = kmem_alloc(sizeof(*buf), KM_SLEEP); | 233 | buf = kmem_alloc(sizeof(*buf), KM_SLEEP); |
228 | 234 | ||
@@ -238,8 +244,7 @@ xfs_bulkstat_one( | |||
238 | } | 244 | } |
239 | 245 | ||
240 | if (copy_to_user(buffer, buf, sizeof(*buf))) { | 246 | if (copy_to_user(buffer, buf, sizeof(*buf))) { |
241 | *stat = BULKSTAT_RV_NOTHING; | 247 | error = EFAULT; |
242 | error = EFAULT; | ||
243 | goto out_free; | 248 | goto out_free; |
244 | } | 249 | } |
245 | 250 | ||
@@ -253,6 +258,46 @@ xfs_bulkstat_one( | |||
253 | } | 258 | } |
254 | 259 | ||
255 | /* | 260 | /* |
261 | * Test to see whether we can use the ondisk inode directly, based | ||
262 | * on the given bulkstat flags, filling in dipp accordingly. | ||
263 | * Returns zero if the inode is dodgey. | ||
264 | */ | ||
265 | STATIC int | ||
266 | xfs_bulkstat_use_dinode( | ||
267 | xfs_mount_t *mp, | ||
268 | int flags, | ||
269 | xfs_buf_t *bp, | ||
270 | int clustidx, | ||
271 | xfs_dinode_t **dipp) | ||
272 | { | ||
273 | xfs_dinode_t *dip; | ||
274 | unsigned int aformat; | ||
275 | |||
276 | *dipp = NULL; | ||
277 | if (!bp || (flags & BULKSTAT_FG_IGET)) | ||
278 | return 1; | ||
279 | dip = (xfs_dinode_t *) | ||
280 | xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog); | ||
281 | if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC || | ||
282 | !XFS_DINODE_GOOD_VERSION( | ||
283 | INT_GET(dip->di_core.di_version, ARCH_CONVERT))) | ||
284 | return 0; | ||
285 | if (flags & BULKSTAT_FG_QUICK) { | ||
286 | *dipp = dip; | ||
287 | return 1; | ||
288 | } | ||
289 | /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */ | ||
290 | aformat = INT_GET(dip->di_core.di_aformat, ARCH_CONVERT); | ||
291 | if ((XFS_CFORK_Q(&dip->di_core) == 0) || | ||
292 | (aformat == XFS_DINODE_FMT_LOCAL) || | ||
293 | (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_core.di_anextents)) { | ||
294 | *dipp = dip; | ||
295 | return 1; | ||
296 | } | ||
297 | return 1; | ||
298 | } | ||
299 | |||
300 | /* | ||
256 | * Return stat information in bulk (by-inode) for the filesystem. | 301 | * Return stat information in bulk (by-inode) for the filesystem. |
257 | */ | 302 | */ |
258 | int /* error status */ | 303 | int /* error status */ |
@@ -284,10 +329,11 @@ xfs_bulkstat( | |||
284 | xfs_agino_t gino; /* current btree rec's start inode */ | 329 | xfs_agino_t gino; /* current btree rec's start inode */ |
285 | int i; /* loop index */ | 330 | int i; /* loop index */ |
286 | int icount; /* count of inodes good in irbuf */ | 331 | int icount; /* count of inodes good in irbuf */ |
332 | size_t irbsize; /* size of irec buffer in bytes */ | ||
287 | xfs_ino_t ino; /* inode number (filesystem) */ | 333 | xfs_ino_t ino; /* inode number (filesystem) */ |
288 | xfs_inobt_rec_t *irbp; /* current irec buffer pointer */ | 334 | xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ |
289 | xfs_inobt_rec_t *irbuf; /* start of irec buffer */ | 335 | xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ |
290 | xfs_inobt_rec_t *irbufend; /* end of good irec buffer entries */ | 336 | xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ |
291 | xfs_ino_t lastino=0; /* last inode number returned */ | 337 | xfs_ino_t lastino=0; /* last inode number returned */ |
292 | int nbcluster; /* # of blocks in a cluster */ | 338 | int nbcluster; /* # of blocks in a cluster */ |
293 | int nicluster; /* # of inodes in a cluster */ | 339 | int nicluster; /* # of inodes in a cluster */ |
@@ -328,13 +374,10 @@ xfs_bulkstat( | |||
328 | (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); | 374 | (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); |
329 | nimask = ~(nicluster - 1); | 375 | nimask = ~(nicluster - 1); |
330 | nbcluster = nicluster >> mp->m_sb.sb_inopblog; | 376 | nbcluster = nicluster >> mp->m_sb.sb_inopblog; |
331 | /* | 377 | irbuf = kmem_zalloc_greedy(&irbsize, NBPC, NBPC * 4, |
332 | * Allocate a page-sized buffer for inode btree records. | 378 | KM_SLEEP | KM_MAYFAIL | KM_LARGE); |
333 | * We could try allocating something smaller, but for normal | 379 | nirbuf = irbsize / sizeof(*irbuf); |
334 | * calls we'll always (potentially) need the whole page. | 380 | |
335 | */ | ||
336 | irbuf = kmem_alloc(NBPC, KM_SLEEP); | ||
337 | nirbuf = NBPC / sizeof(*irbuf); | ||
338 | /* | 381 | /* |
339 | * Loop over the allocation groups, starting from the last | 382 | * Loop over the allocation groups, starting from the last |
340 | * inode returned; 0 means start of the allocation group. | 383 | * inode returned; 0 means start of the allocation group. |
@@ -358,7 +401,7 @@ xfs_bulkstat( | |||
358 | * Allocate and initialize a btree cursor for ialloc btree. | 401 | * Allocate and initialize a btree cursor for ialloc btree. |
359 | */ | 402 | */ |
360 | cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO, | 403 | cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO, |
361 | (xfs_inode_t *)0, 0); | 404 | (xfs_inode_t *)0, 0); |
362 | irbp = irbuf; | 405 | irbp = irbuf; |
363 | irbufend = irbuf + nirbuf; | 406 | irbufend = irbuf + nirbuf; |
364 | end_of_ag = 0; | 407 | end_of_ag = 0; |
@@ -395,9 +438,9 @@ xfs_bulkstat( | |||
395 | gcnt++; | 438 | gcnt++; |
396 | } | 439 | } |
397 | gfree |= XFS_INOBT_MASKN(0, chunkidx); | 440 | gfree |= XFS_INOBT_MASKN(0, chunkidx); |
398 | INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); | 441 | irbp->ir_startino = gino; |
399 | INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); | 442 | irbp->ir_freecount = gcnt; |
400 | INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); | 443 | irbp->ir_free = gfree; |
401 | irbp++; | 444 | irbp++; |
402 | agino = gino + XFS_INODES_PER_CHUNK; | 445 | agino = gino + XFS_INODES_PER_CHUNK; |
403 | icount = XFS_INODES_PER_CHUNK - gcnt; | 446 | icount = XFS_INODES_PER_CHUNK - gcnt; |
@@ -451,11 +494,27 @@ xfs_bulkstat( | |||
451 | } | 494 | } |
452 | /* | 495 | /* |
453 | * If this chunk has any allocated inodes, save it. | 496 | * If this chunk has any allocated inodes, save it. |
497 | * Also start read-ahead now for this chunk. | ||
454 | */ | 498 | */ |
455 | if (gcnt < XFS_INODES_PER_CHUNK) { | 499 | if (gcnt < XFS_INODES_PER_CHUNK) { |
456 | INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); | 500 | /* |
457 | INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); | 501 | * Loop over all clusters in the next chunk. |
458 | INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); | 502 | * Do a readahead if there are any allocated |
503 | * inodes in that cluster. | ||
504 | */ | ||
505 | for (agbno = XFS_AGINO_TO_AGBNO(mp, gino), | ||
506 | chunkidx = 0; | ||
507 | chunkidx < XFS_INODES_PER_CHUNK; | ||
508 | chunkidx += nicluster, | ||
509 | agbno += nbcluster) { | ||
510 | if (XFS_INOBT_MASKN(chunkidx, | ||
511 | nicluster) & ~gfree) | ||
512 | xfs_btree_reada_bufs(mp, agno, | ||
513 | agbno, nbcluster); | ||
514 | } | ||
515 | irbp->ir_startino = gino; | ||
516 | irbp->ir_freecount = gcnt; | ||
517 | irbp->ir_free = gfree; | ||
459 | irbp++; | 518 | irbp++; |
460 | icount += XFS_INODES_PER_CHUNK - gcnt; | 519 | icount += XFS_INODES_PER_CHUNK - gcnt; |
461 | } | 520 | } |
@@ -479,33 +538,11 @@ xfs_bulkstat( | |||
479 | for (irbp = irbuf; | 538 | for (irbp = irbuf; |
480 | irbp < irbufend && ubleft >= statstruct_size; irbp++) { | 539 | irbp < irbufend && ubleft >= statstruct_size; irbp++) { |
481 | /* | 540 | /* |
482 | * Read-ahead the next chunk's worth of inodes. | ||
483 | */ | ||
484 | if (&irbp[1] < irbufend) { | ||
485 | /* | ||
486 | * Loop over all clusters in the next chunk. | ||
487 | * Do a readahead if there are any allocated | ||
488 | * inodes in that cluster. | ||
489 | */ | ||
490 | for (agbno = XFS_AGINO_TO_AGBNO(mp, | ||
491 | INT_GET(irbp[1].ir_startino, ARCH_CONVERT)), | ||
492 | chunkidx = 0; | ||
493 | chunkidx < XFS_INODES_PER_CHUNK; | ||
494 | chunkidx += nicluster, | ||
495 | agbno += nbcluster) { | ||
496 | if (XFS_INOBT_MASKN(chunkidx, | ||
497 | nicluster) & | ||
498 | ~(INT_GET(irbp[1].ir_free, ARCH_CONVERT))) | ||
499 | xfs_btree_reada_bufs(mp, agno, | ||
500 | agbno, nbcluster); | ||
501 | } | ||
502 | } | ||
503 | /* | ||
504 | * Now process this chunk of inodes. | 541 | * Now process this chunk of inodes. |
505 | */ | 542 | */ |
506 | for (agino = INT_GET(irbp->ir_startino, ARCH_CONVERT), chunkidx = 0, clustidx = 0; | 543 | for (agino = irbp->ir_startino, chunkidx = clustidx = 0; |
507 | ubleft > 0 && | 544 | ubleft > 0 && |
508 | INT_GET(irbp->ir_freecount, ARCH_CONVERT) < XFS_INODES_PER_CHUNK; | 545 | irbp->ir_freecount < XFS_INODES_PER_CHUNK; |
509 | chunkidx++, clustidx++, agino++) { | 546 | chunkidx++, clustidx++, agino++) { |
510 | ASSERT(chunkidx < XFS_INODES_PER_CHUNK); | 547 | ASSERT(chunkidx < XFS_INODES_PER_CHUNK); |
511 | /* | 548 | /* |
@@ -525,11 +562,12 @@ xfs_bulkstat( | |||
525 | */ | 562 | */ |
526 | if ((chunkidx & (nicluster - 1)) == 0) { | 563 | if ((chunkidx & (nicluster - 1)) == 0) { |
527 | agbno = XFS_AGINO_TO_AGBNO(mp, | 564 | agbno = XFS_AGINO_TO_AGBNO(mp, |
528 | INT_GET(irbp->ir_startino, ARCH_CONVERT)) + | 565 | irbp->ir_startino) + |
529 | ((chunkidx & nimask) >> | 566 | ((chunkidx & nimask) >> |
530 | mp->m_sb.sb_inopblog); | 567 | mp->m_sb.sb_inopblog); |
531 | 568 | ||
532 | if (flags & BULKSTAT_FG_QUICK) { | 569 | if (flags & (BULKSTAT_FG_QUICK | |
570 | BULKSTAT_FG_INLINE)) { | ||
533 | ino = XFS_AGINO_TO_INO(mp, agno, | 571 | ino = XFS_AGINO_TO_INO(mp, agno, |
534 | agino); | 572 | agino); |
535 | bno = XFS_AGB_TO_DADDR(mp, agno, | 573 | bno = XFS_AGB_TO_DADDR(mp, agno, |
@@ -543,6 +581,7 @@ xfs_bulkstat( | |||
543 | KM_SLEEP); | 581 | KM_SLEEP); |
544 | ip->i_ino = ino; | 582 | ip->i_ino = ino; |
545 | ip->i_mount = mp; | 583 | ip->i_mount = mp; |
584 | spin_lock_init(&ip->i_flags_lock); | ||
546 | if (bp) | 585 | if (bp) |
547 | xfs_buf_relse(bp); | 586 | xfs_buf_relse(bp); |
548 | error = xfs_itobp(mp, NULL, ip, | 587 | error = xfs_itobp(mp, NULL, ip, |
@@ -564,30 +603,34 @@ xfs_bulkstat( | |||
564 | /* | 603 | /* |
565 | * Skip if this inode is free. | 604 | * Skip if this inode is free. |
566 | */ | 605 | */ |
567 | if (XFS_INOBT_MASK(chunkidx) & INT_GET(irbp->ir_free, ARCH_CONVERT)) | 606 | if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) |
568 | continue; | 607 | continue; |
569 | /* | 608 | /* |
570 | * Count used inodes as free so we can tell | 609 | * Count used inodes as free so we can tell |
571 | * when the chunk is used up. | 610 | * when the chunk is used up. |
572 | */ | 611 | */ |
573 | INT_MOD(irbp->ir_freecount, ARCH_CONVERT, +1); | 612 | irbp->ir_freecount++; |
574 | ino = XFS_AGINO_TO_INO(mp, agno, agino); | 613 | ino = XFS_AGINO_TO_INO(mp, agno, agino); |
575 | bno = XFS_AGB_TO_DADDR(mp, agno, agbno); | 614 | bno = XFS_AGB_TO_DADDR(mp, agno, agbno); |
576 | if (flags & BULKSTAT_FG_QUICK) { | 615 | if (!xfs_bulkstat_use_dinode(mp, flags, bp, |
577 | dip = (xfs_dinode_t *)xfs_buf_offset(bp, | 616 | clustidx, &dip)) |
578 | (clustidx << mp->m_sb.sb_inodelog)); | 617 | continue; |
579 | 618 | /* | |
580 | if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) | 619 | * If we need to do an iget, cannot hold bp. |
581 | != XFS_DINODE_MAGIC | 620 | * Drop it, until starting the next cluster. |
582 | || !XFS_DINODE_GOOD_VERSION( | 621 | */ |
583 | INT_GET(dip->di_core.di_version, ARCH_CONVERT))) | 622 | if ((flags & BULKSTAT_FG_INLINE) && !dip) { |
584 | continue; | 623 | if (bp) |
624 | xfs_buf_relse(bp); | ||
625 | bp = NULL; | ||
585 | } | 626 | } |
586 | 627 | ||
587 | /* | 628 | /* |
588 | * Get the inode and fill in a single buffer. | 629 | * Get the inode and fill in a single buffer. |
589 | * BULKSTAT_FG_QUICK uses dip to fill it in. | 630 | * BULKSTAT_FG_QUICK uses dip to fill it in. |
590 | * BULKSTAT_FG_IGET uses igets. | 631 | * BULKSTAT_FG_IGET uses igets. |
632 | * BULKSTAT_FG_INLINE uses dip if we have an | ||
633 | * inline attr fork, else igets. | ||
591 | * See: xfs_bulkstat_one & xfs_dm_bulkstat_one. | 634 | * See: xfs_bulkstat_one & xfs_dm_bulkstat_one. |
592 | * This is also used to count inodes/blks, etc | 635 | * This is also used to count inodes/blks, etc |
593 | * in xfs_qm_quotacheck. | 636 | * in xfs_qm_quotacheck. |
@@ -597,8 +640,15 @@ xfs_bulkstat( | |||
597 | ubleft, private_data, | 640 | ubleft, private_data, |
598 | bno, &ubused, dip, &fmterror); | 641 | bno, &ubused, dip, &fmterror); |
599 | if (fmterror == BULKSTAT_RV_NOTHING) { | 642 | if (fmterror == BULKSTAT_RV_NOTHING) { |
600 | if (error == ENOMEM) | 643 | if (error == EFAULT) { |
644 | ubleft = 0; | ||
645 | rval = error; | ||
646 | break; | ||
647 | } | ||
648 | else if (error == ENOMEM) | ||
601 | ubleft = 0; | 649 | ubleft = 0; |
650 | else | ||
651 | lastino = ino; | ||
602 | continue; | 652 | continue; |
603 | } | 653 | } |
604 | if (fmterror == BULKSTAT_RV_GIVEUP) { | 654 | if (fmterror == BULKSTAT_RV_GIVEUP) { |
@@ -633,7 +683,7 @@ xfs_bulkstat( | |||
633 | /* | 683 | /* |
634 | * Done, we're either out of filesystem or space to put the data. | 684 | * Done, we're either out of filesystem or space to put the data. |
635 | */ | 685 | */ |
636 | kmem_free(irbuf, NBPC); | 686 | kmem_free(irbuf, irbsize); |
637 | *ubcountp = ubelem; | 687 | *ubcountp = ubelem; |
638 | if (agno >= mp->m_sb.sb_agcount) { | 688 | if (agno >= mp->m_sb.sb_agcount) { |
639 | /* | 689 | /* |