aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_itable.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_itable.c')
-rw-r--r--fs/xfs/xfs_itable.c119
1 files changed, 65 insertions, 54 deletions
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index aeb2d2221c7d..62efab2f3839 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -39,7 +39,7 @@
39#include "xfs_error.h" 39#include "xfs_error.h"
40#include "xfs_btree.h" 40#include "xfs_btree.h"
41 41
42int 42STATIC int
43xfs_internal_inum( 43xfs_internal_inum(
44 xfs_mount_t *mp, 44 xfs_mount_t *mp,
45 xfs_ino_t ino) 45 xfs_ino_t ino)
@@ -59,6 +59,7 @@ xfs_bulkstat_one_iget(
59{ 59{
60 xfs_icdinode_t *dic; /* dinode core info pointer */ 60 xfs_icdinode_t *dic; /* dinode core info pointer */
61 xfs_inode_t *ip; /* incore inode pointer */ 61 xfs_inode_t *ip; /* incore inode pointer */
62 struct inode *inode;
62 int error; 63 int error;
63 64
64 error = xfs_iget(mp, NULL, ino, 65 error = xfs_iget(mp, NULL, ino,
@@ -72,6 +73,7 @@ xfs_bulkstat_one_iget(
72 ASSERT(ip->i_imap.im_blkno != 0); 73 ASSERT(ip->i_imap.im_blkno != 0);
73 74
74 dic = &ip->i_d; 75 dic = &ip->i_d;
76 inode = VFS_I(ip);
75 77
76 /* xfs_iget returns the following without needing 78 /* xfs_iget returns the following without needing
77 * further change. 79 * further change.
@@ -83,16 +85,19 @@ xfs_bulkstat_one_iget(
83 buf->bs_uid = dic->di_uid; 85 buf->bs_uid = dic->di_uid;
84 buf->bs_gid = dic->di_gid; 86 buf->bs_gid = dic->di_gid;
85 buf->bs_size = dic->di_size; 87 buf->bs_size = dic->di_size;
88
86 /* 89 /*
87 * We are reading the atime from the Linux inode because the 90 * We need to read the timestamps from the Linux inode because
88 * dinode might not be uptodate. 91 * the VFS keeps writing directly into the inode structure instead
92 * of telling us about the updates.
89 */ 93 */
90 buf->bs_atime.tv_sec = VFS_I(ip)->i_atime.tv_sec; 94 buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
91 buf->bs_atime.tv_nsec = VFS_I(ip)->i_atime.tv_nsec; 95 buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
92 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; 96 buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
93 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; 97 buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
94 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; 98 buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
95 buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec; 99 buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
100
96 buf->bs_xflags = xfs_ip2xflags(ip); 101 buf->bs_xflags = xfs_ip2xflags(ip);
97 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; 102 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
98 buf->bs_extents = dic->di_nextents; 103 buf->bs_extents = dic->di_nextents;
@@ -353,9 +358,6 @@ xfs_bulkstat(
353 int end_of_ag; /* set if we've seen the ag end */ 358 int end_of_ag; /* set if we've seen the ag end */
354 int error; /* error code */ 359 int error; /* error code */
355 int fmterror;/* bulkstat formatter result */ 360 int fmterror;/* bulkstat formatter result */
356 __int32_t gcnt; /* current btree rec's count */
357 xfs_inofree_t gfree; /* current btree rec's free mask */
358 xfs_agino_t gino; /* current btree rec's start inode */
359 int i; /* loop index */ 361 int i; /* loop index */
360 int icount; /* count of inodes good in irbuf */ 362 int icount; /* count of inodes good in irbuf */
361 size_t irbsize; /* size of irec buffer in bytes */ 363 size_t irbsize; /* size of irec buffer in bytes */
@@ -442,40 +444,43 @@ xfs_bulkstat(
442 * we need to get the remainder of the chunk we're in. 444 * we need to get the remainder of the chunk we're in.
443 */ 445 */
444 if (agino > 0) { 446 if (agino > 0) {
447 xfs_inobt_rec_incore_t r;
448
445 /* 449 /*
446 * Lookup the inode chunk that this inode lives in. 450 * Lookup the inode chunk that this inode lives in.
447 */ 451 */
448 error = xfs_inobt_lookup_le(cur, agino, 0, 0, &tmp); 452 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE,
453 &tmp);
449 if (!error && /* no I/O error */ 454 if (!error && /* no I/O error */
450 tmp && /* lookup succeeded */ 455 tmp && /* lookup succeeded */
451 /* got the record, should always work */ 456 /* got the record, should always work */
452 !(error = xfs_inobt_get_rec(cur, &gino, &gcnt, 457 !(error = xfs_inobt_get_rec(cur, &r, &i)) &&
453 &gfree, &i)) &&
454 i == 1 && 458 i == 1 &&
455 /* this is the right chunk */ 459 /* this is the right chunk */
456 agino < gino + XFS_INODES_PER_CHUNK && 460 agino < r.ir_startino + XFS_INODES_PER_CHUNK &&
457 /* lastino was not last in chunk */ 461 /* lastino was not last in chunk */
458 (chunkidx = agino - gino + 1) < 462 (chunkidx = agino - r.ir_startino + 1) <
459 XFS_INODES_PER_CHUNK && 463 XFS_INODES_PER_CHUNK &&
460 /* there are some left allocated */ 464 /* there are some left allocated */
461 xfs_inobt_maskn(chunkidx, 465 xfs_inobt_maskn(chunkidx,
462 XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) { 466 XFS_INODES_PER_CHUNK - chunkidx) &
467 ~r.ir_free) {
463 /* 468 /*
464 * Grab the chunk record. Mark all the 469 * Grab the chunk record. Mark all the
465 * uninteresting inodes (because they're 470 * uninteresting inodes (because they're
466 * before our start point) free. 471 * before our start point) free.
467 */ 472 */
468 for (i = 0; i < chunkidx; i++) { 473 for (i = 0; i < chunkidx; i++) {
469 if (XFS_INOBT_MASK(i) & ~gfree) 474 if (XFS_INOBT_MASK(i) & ~r.ir_free)
470 gcnt++; 475 r.ir_freecount++;
471 } 476 }
472 gfree |= xfs_inobt_maskn(0, chunkidx); 477 r.ir_free |= xfs_inobt_maskn(0, chunkidx);
473 irbp->ir_startino = gino; 478 irbp->ir_startino = r.ir_startino;
474 irbp->ir_freecount = gcnt; 479 irbp->ir_freecount = r.ir_freecount;
475 irbp->ir_free = gfree; 480 irbp->ir_free = r.ir_free;
476 irbp++; 481 irbp++;
477 agino = gino + XFS_INODES_PER_CHUNK; 482 agino = r.ir_startino + XFS_INODES_PER_CHUNK;
478 icount = XFS_INODES_PER_CHUNK - gcnt; 483 icount = XFS_INODES_PER_CHUNK - r.ir_freecount;
479 } else { 484 } else {
480 /* 485 /*
481 * If any of those tests failed, bump the 486 * If any of those tests failed, bump the
@@ -493,7 +498,7 @@ xfs_bulkstat(
493 /* 498 /*
494 * Start of ag. Lookup the first inode chunk. 499 * Start of ag. Lookup the first inode chunk.
495 */ 500 */
496 error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &tmp); 501 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
497 icount = 0; 502 icount = 0;
498 } 503 }
499 /* 504 /*
@@ -501,6 +506,8 @@ xfs_bulkstat(
501 * until we run out of inodes or space in the buffer. 506 * until we run out of inodes or space in the buffer.
502 */ 507 */
503 while (irbp < irbufend && icount < ubcount) { 508 while (irbp < irbufend && icount < ubcount) {
509 xfs_inobt_rec_incore_t r;
510
504 /* 511 /*
505 * Loop as long as we're unable to read the 512 * Loop as long as we're unable to read the
506 * inode btree. 513 * inode btree.
@@ -510,51 +517,55 @@ xfs_bulkstat(
510 if (XFS_AGINO_TO_AGBNO(mp, agino) >= 517 if (XFS_AGINO_TO_AGBNO(mp, agino) >=
511 be32_to_cpu(agi->agi_length)) 518 be32_to_cpu(agi->agi_length))
512 break; 519 break;
513 error = xfs_inobt_lookup_ge(cur, agino, 0, 0, 520 error = xfs_inobt_lookup(cur, agino,
514 &tmp); 521 XFS_LOOKUP_GE, &tmp);
515 cond_resched(); 522 cond_resched();
516 } 523 }
517 /* 524 /*
518 * If ran off the end of the ag either with an error, 525 * If ran off the end of the ag either with an error,
519 * or the normal way, set end and stop collecting. 526 * or the normal way, set end and stop collecting.
520 */ 527 */
521 if (error || 528 if (error) {
522 (error = xfs_inobt_get_rec(cur, &gino, &gcnt, 529 end_of_ag = 1;
523 &gfree, &i)) || 530 break;
524 i == 0) { 531 }
532
533 error = xfs_inobt_get_rec(cur, &r, &i);
534 if (error || i == 0) {
525 end_of_ag = 1; 535 end_of_ag = 1;
526 break; 536 break;
527 } 537 }
538
528 /* 539 /*
529 * If this chunk has any allocated inodes, save it. 540 * If this chunk has any allocated inodes, save it.
530 * Also start read-ahead now for this chunk. 541 * Also start read-ahead now for this chunk.
531 */ 542 */
532 if (gcnt < XFS_INODES_PER_CHUNK) { 543 if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
533 /* 544 /*
534 * Loop over all clusters in the next chunk. 545 * Loop over all clusters in the next chunk.
535 * Do a readahead if there are any allocated 546 * Do a readahead if there are any allocated
536 * inodes in that cluster. 547 * inodes in that cluster.
537 */ 548 */
538 for (agbno = XFS_AGINO_TO_AGBNO(mp, gino), 549 agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
539 chunkidx = 0; 550 for (chunkidx = 0;
540 chunkidx < XFS_INODES_PER_CHUNK; 551 chunkidx < XFS_INODES_PER_CHUNK;
541 chunkidx += nicluster, 552 chunkidx += nicluster,
542 agbno += nbcluster) { 553 agbno += nbcluster) {
543 if (xfs_inobt_maskn(chunkidx, 554 if (xfs_inobt_maskn(chunkidx, nicluster)
544 nicluster) & ~gfree) 555 & ~r.ir_free)
545 xfs_btree_reada_bufs(mp, agno, 556 xfs_btree_reada_bufs(mp, agno,
546 agbno, nbcluster); 557 agbno, nbcluster);
547 } 558 }
548 irbp->ir_startino = gino; 559 irbp->ir_startino = r.ir_startino;
549 irbp->ir_freecount = gcnt; 560 irbp->ir_freecount = r.ir_freecount;
550 irbp->ir_free = gfree; 561 irbp->ir_free = r.ir_free;
551 irbp++; 562 irbp++;
552 icount += XFS_INODES_PER_CHUNK - gcnt; 563 icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
553 } 564 }
554 /* 565 /*
555 * Set agino to after this chunk and bump the cursor. 566 * Set agino to after this chunk and bump the cursor.
556 */ 567 */
557 agino = gino + XFS_INODES_PER_CHUNK; 568 agino = r.ir_startino + XFS_INODES_PER_CHUNK;
558 error = xfs_btree_increment(cur, 0, &tmp); 569 error = xfs_btree_increment(cur, 0, &tmp);
559 cond_resched(); 570 cond_resched();
560 } 571 }
@@ -820,9 +831,7 @@ xfs_inumbers(
820 int bufidx; 831 int bufidx;
821 xfs_btree_cur_t *cur; 832 xfs_btree_cur_t *cur;
822 int error; 833 int error;
823 __int32_t gcnt; 834 xfs_inobt_rec_incore_t r;
824 xfs_inofree_t gfree;
825 xfs_agino_t gino;
826 int i; 835 int i;
827 xfs_ino_t ino; 836 xfs_ino_t ino;
828 int left; 837 int left;
@@ -855,7 +864,8 @@ xfs_inumbers(
855 continue; 864 continue;
856 } 865 }
857 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); 866 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno);
858 error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); 867 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
868 &tmp);
859 if (error) { 869 if (error) {
860 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 870 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
861 cur = NULL; 871 cur = NULL;
@@ -870,9 +880,8 @@ xfs_inumbers(
870 continue; 880 continue;
871 } 881 }
872 } 882 }
873 if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree, 883 error = xfs_inobt_get_rec(cur, &r, &i);
874 &i)) || 884 if (error || i == 0) {
875 i == 0) {
876 xfs_buf_relse(agbp); 885 xfs_buf_relse(agbp);
877 agbp = NULL; 886 agbp = NULL;
878 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 887 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
@@ -881,10 +890,12 @@ xfs_inumbers(
881 agino = 0; 890 agino = 0;
882 continue; 891 continue;
883 } 892 }
884 agino = gino + XFS_INODES_PER_CHUNK - 1; 893 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
885 buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino); 894 buffer[bufidx].xi_startino =
886 buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt; 895 XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
887 buffer[bufidx].xi_allocmask = ~gfree; 896 buffer[bufidx].xi_alloccount =
897 XFS_INODES_PER_CHUNK - r.ir_freecount;
898 buffer[bufidx].xi_allocmask = ~r.ir_free;
888 bufidx++; 899 bufidx++;
889 left--; 900 left--;
890 if (bufidx == bcount) { 901 if (bufidx == bcount) {