diff options
| author | Alex Elder <aelder@sgi.com> | 2009-09-15 22:37:47 -0400 |
|---|---|---|
| committer | Alex Elder <aelder@sgi.com> | 2009-09-15 22:37:47 -0400 |
| commit | fdec29c5fcd2705d61c1d14a1d4c74be03e9627c (patch) | |
| tree | bcf5d4dd46b4945f3b4bec6b763ff9a9484e4e59 /fs/xfs/xfs_itable.c | |
| parent | 0cb583fd2862f19ea88b02eb307d11c09e51e2f8 (diff) | |
| parent | 9ef96da6ec5e1b4cf7eb8e30852cd88ec7d5fdc0 (diff) | |
Merge branch 'master' of git://oss.sgi.com/xfs/xfs into for-linus
Conflicts:
fs/xfs/linux-2.6/xfs_lrw.c
Diffstat (limited to 'fs/xfs/xfs_itable.c')
| -rw-r--r-- | fs/xfs/xfs_itable.c | 98 |
1 files changed, 52 insertions, 46 deletions
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index aeb2d2221c7d..b68f9107e26c 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
| @@ -39,7 +39,7 @@ | |||
| 39 | #include "xfs_error.h" | 39 | #include "xfs_error.h" |
| 40 | #include "xfs_btree.h" | 40 | #include "xfs_btree.h" |
| 41 | 41 | ||
| 42 | int | 42 | STATIC int |
| 43 | xfs_internal_inum( | 43 | xfs_internal_inum( |
| 44 | xfs_mount_t *mp, | 44 | xfs_mount_t *mp, |
| 45 | xfs_ino_t ino) | 45 | xfs_ino_t ino) |
| @@ -353,9 +353,6 @@ xfs_bulkstat( | |||
| 353 | int end_of_ag; /* set if we've seen the ag end */ | 353 | int end_of_ag; /* set if we've seen the ag end */ |
| 354 | int error; /* error code */ | 354 | int error; /* error code */ |
| 355 | int fmterror;/* bulkstat formatter result */ | 355 | int fmterror;/* bulkstat formatter result */ |
| 356 | __int32_t gcnt; /* current btree rec's count */ | ||
| 357 | xfs_inofree_t gfree; /* current btree rec's free mask */ | ||
| 358 | xfs_agino_t gino; /* current btree rec's start inode */ | ||
| 359 | int i; /* loop index */ | 356 | int i; /* loop index */ |
| 360 | int icount; /* count of inodes good in irbuf */ | 357 | int icount; /* count of inodes good in irbuf */ |
| 361 | size_t irbsize; /* size of irec buffer in bytes */ | 358 | size_t irbsize; /* size of irec buffer in bytes */ |
| @@ -442,40 +439,43 @@ xfs_bulkstat( | |||
| 442 | * we need to get the remainder of the chunk we're in. | 439 | * we need to get the remainder of the chunk we're in. |
| 443 | */ | 440 | */ |
| 444 | if (agino > 0) { | 441 | if (agino > 0) { |
| 442 | xfs_inobt_rec_incore_t r; | ||
| 443 | |||
| 445 | /* | 444 | /* |
| 446 | * Lookup the inode chunk that this inode lives in. | 445 | * Lookup the inode chunk that this inode lives in. |
| 447 | */ | 446 | */ |
| 448 | error = xfs_inobt_lookup_le(cur, agino, 0, 0, &tmp); | 447 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, |
| 448 | &tmp); | ||
| 449 | if (!error && /* no I/O error */ | 449 | if (!error && /* no I/O error */ |
| 450 | tmp && /* lookup succeeded */ | 450 | tmp && /* lookup succeeded */ |
| 451 | /* got the record, should always work */ | 451 | /* got the record, should always work */ |
| 452 | !(error = xfs_inobt_get_rec(cur, &gino, &gcnt, | 452 | !(error = xfs_inobt_get_rec(cur, &r, &i)) && |
| 453 | &gfree, &i)) && | ||
| 454 | i == 1 && | 453 | i == 1 && |
| 455 | /* this is the right chunk */ | 454 | /* this is the right chunk */ |
| 456 | agino < gino + XFS_INODES_PER_CHUNK && | 455 | agino < r.ir_startino + XFS_INODES_PER_CHUNK && |
| 457 | /* lastino was not last in chunk */ | 456 | /* lastino was not last in chunk */ |
| 458 | (chunkidx = agino - gino + 1) < | 457 | (chunkidx = agino - r.ir_startino + 1) < |
| 459 | XFS_INODES_PER_CHUNK && | 458 | XFS_INODES_PER_CHUNK && |
| 460 | /* there are some left allocated */ | 459 | /* there are some left allocated */ |
| 461 | xfs_inobt_maskn(chunkidx, | 460 | xfs_inobt_maskn(chunkidx, |
| 462 | XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) { | 461 | XFS_INODES_PER_CHUNK - chunkidx) & |
| 462 | ~r.ir_free) { | ||
| 463 | /* | 463 | /* |
| 464 | * Grab the chunk record. Mark all the | 464 | * Grab the chunk record. Mark all the |
| 465 | * uninteresting inodes (because they're | 465 | * uninteresting inodes (because they're |
| 466 | * before our start point) free. | 466 | * before our start point) free. |
| 467 | */ | 467 | */ |
| 468 | for (i = 0; i < chunkidx; i++) { | 468 | for (i = 0; i < chunkidx; i++) { |
| 469 | if (XFS_INOBT_MASK(i) & ~gfree) | 469 | if (XFS_INOBT_MASK(i) & ~r.ir_free) |
| 470 | gcnt++; | 470 | r.ir_freecount++; |
| 471 | } | 471 | } |
| 472 | gfree |= xfs_inobt_maskn(0, chunkidx); | 472 | r.ir_free |= xfs_inobt_maskn(0, chunkidx); |
| 473 | irbp->ir_startino = gino; | 473 | irbp->ir_startino = r.ir_startino; |
| 474 | irbp->ir_freecount = gcnt; | 474 | irbp->ir_freecount = r.ir_freecount; |
| 475 | irbp->ir_free = gfree; | 475 | irbp->ir_free = r.ir_free; |
| 476 | irbp++; | 476 | irbp++; |
| 477 | agino = gino + XFS_INODES_PER_CHUNK; | 477 | agino = r.ir_startino + XFS_INODES_PER_CHUNK; |
| 478 | icount = XFS_INODES_PER_CHUNK - gcnt; | 478 | icount = XFS_INODES_PER_CHUNK - r.ir_freecount; |
| 479 | } else { | 479 | } else { |
| 480 | /* | 480 | /* |
| 481 | * If any of those tests failed, bump the | 481 | * If any of those tests failed, bump the |
| @@ -493,7 +493,7 @@ xfs_bulkstat( | |||
| 493 | /* | 493 | /* |
| 494 | * Start of ag. Lookup the first inode chunk. | 494 | * Start of ag. Lookup the first inode chunk. |
| 495 | */ | 495 | */ |
| 496 | error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &tmp); | 496 | error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); |
| 497 | icount = 0; | 497 | icount = 0; |
| 498 | } | 498 | } |
| 499 | /* | 499 | /* |
| @@ -501,6 +501,8 @@ xfs_bulkstat( | |||
| 501 | * until we run out of inodes or space in the buffer. | 501 | * until we run out of inodes or space in the buffer. |
| 502 | */ | 502 | */ |
| 503 | while (irbp < irbufend && icount < ubcount) { | 503 | while (irbp < irbufend && icount < ubcount) { |
| 504 | xfs_inobt_rec_incore_t r; | ||
| 505 | |||
| 504 | /* | 506 | /* |
| 505 | * Loop as long as we're unable to read the | 507 | * Loop as long as we're unable to read the |
| 506 | * inode btree. | 508 | * inode btree. |
| @@ -510,51 +512,55 @@ xfs_bulkstat( | |||
| 510 | if (XFS_AGINO_TO_AGBNO(mp, agino) >= | 512 | if (XFS_AGINO_TO_AGBNO(mp, agino) >= |
| 511 | be32_to_cpu(agi->agi_length)) | 513 | be32_to_cpu(agi->agi_length)) |
| 512 | break; | 514 | break; |
| 513 | error = xfs_inobt_lookup_ge(cur, agino, 0, 0, | 515 | error = xfs_inobt_lookup(cur, agino, |
| 514 | &tmp); | 516 | XFS_LOOKUP_GE, &tmp); |
| 515 | cond_resched(); | 517 | cond_resched(); |
| 516 | } | 518 | } |
| 517 | /* | 519 | /* |
| 518 | * If ran off the end of the ag either with an error, | 520 | * If ran off the end of the ag either with an error, |
| 519 | * or the normal way, set end and stop collecting. | 521 | * or the normal way, set end and stop collecting. |
| 520 | */ | 522 | */ |
| 521 | if (error || | 523 | if (error) { |
| 522 | (error = xfs_inobt_get_rec(cur, &gino, &gcnt, | ||
| 523 | &gfree, &i)) || | ||
| 524 | i == 0) { | ||
| 525 | end_of_ag = 1; | 524 | end_of_ag = 1; |
| 526 | break; | 525 | break; |
| 527 | } | 526 | } |
| 527 | |||
| 528 | error = xfs_inobt_get_rec(cur, &r, &i); | ||
| 529 | if (error || i == 0) { | ||
| 530 | end_of_ag = 1; | ||
| 531 | break; | ||
| 532 | } | ||
| 533 | |||
| 528 | /* | 534 | /* |
| 529 | * If this chunk has any allocated inodes, save it. | 535 | * If this chunk has any allocated inodes, save it. |
| 530 | * Also start read-ahead now for this chunk. | 536 | * Also start read-ahead now for this chunk. |
| 531 | */ | 537 | */ |
| 532 | if (gcnt < XFS_INODES_PER_CHUNK) { | 538 | if (r.ir_freecount < XFS_INODES_PER_CHUNK) { |
| 533 | /* | 539 | /* |
| 534 | * Loop over all clusters in the next chunk. | 540 | * Loop over all clusters in the next chunk. |
| 535 | * Do a readahead if there are any allocated | 541 | * Do a readahead if there are any allocated |
| 536 | * inodes in that cluster. | 542 | * inodes in that cluster. |
| 537 | */ | 543 | */ |
| 538 | for (agbno = XFS_AGINO_TO_AGBNO(mp, gino), | 544 | agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); |
| 539 | chunkidx = 0; | 545 | for (chunkidx = 0; |
| 540 | chunkidx < XFS_INODES_PER_CHUNK; | 546 | chunkidx < XFS_INODES_PER_CHUNK; |
| 541 | chunkidx += nicluster, | 547 | chunkidx += nicluster, |
| 542 | agbno += nbcluster) { | 548 | agbno += nbcluster) { |
| 543 | if (xfs_inobt_maskn(chunkidx, | 549 | if (xfs_inobt_maskn(chunkidx, nicluster) |
| 544 | nicluster) & ~gfree) | 550 | & ~r.ir_free) |
| 545 | xfs_btree_reada_bufs(mp, agno, | 551 | xfs_btree_reada_bufs(mp, agno, |
| 546 | agbno, nbcluster); | 552 | agbno, nbcluster); |
| 547 | } | 553 | } |
| 548 | irbp->ir_startino = gino; | 554 | irbp->ir_startino = r.ir_startino; |
| 549 | irbp->ir_freecount = gcnt; | 555 | irbp->ir_freecount = r.ir_freecount; |
| 550 | irbp->ir_free = gfree; | 556 | irbp->ir_free = r.ir_free; |
| 551 | irbp++; | 557 | irbp++; |
| 552 | icount += XFS_INODES_PER_CHUNK - gcnt; | 558 | icount += XFS_INODES_PER_CHUNK - r.ir_freecount; |
| 553 | } | 559 | } |
| 554 | /* | 560 | /* |
| 555 | * Set agino to after this chunk and bump the cursor. | 561 | * Set agino to after this chunk and bump the cursor. |
| 556 | */ | 562 | */ |
| 557 | agino = gino + XFS_INODES_PER_CHUNK; | 563 | agino = r.ir_startino + XFS_INODES_PER_CHUNK; |
| 558 | error = xfs_btree_increment(cur, 0, &tmp); | 564 | error = xfs_btree_increment(cur, 0, &tmp); |
| 559 | cond_resched(); | 565 | cond_resched(); |
| 560 | } | 566 | } |
| @@ -820,9 +826,7 @@ xfs_inumbers( | |||
| 820 | int bufidx; | 826 | int bufidx; |
| 821 | xfs_btree_cur_t *cur; | 827 | xfs_btree_cur_t *cur; |
| 822 | int error; | 828 | int error; |
| 823 | __int32_t gcnt; | 829 | xfs_inobt_rec_incore_t r; |
| 824 | xfs_inofree_t gfree; | ||
| 825 | xfs_agino_t gino; | ||
| 826 | int i; | 830 | int i; |
| 827 | xfs_ino_t ino; | 831 | xfs_ino_t ino; |
| 828 | int left; | 832 | int left; |
| @@ -855,7 +859,8 @@ xfs_inumbers( | |||
| 855 | continue; | 859 | continue; |
| 856 | } | 860 | } |
| 857 | cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); | 861 | cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); |
| 858 | error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); | 862 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, |
| 863 | &tmp); | ||
| 859 | if (error) { | 864 | if (error) { |
| 860 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | 865 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); |
| 861 | cur = NULL; | 866 | cur = NULL; |
| @@ -870,9 +875,8 @@ xfs_inumbers( | |||
| 870 | continue; | 875 | continue; |
| 871 | } | 876 | } |
| 872 | } | 877 | } |
| 873 | if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree, | 878 | error = xfs_inobt_get_rec(cur, &r, &i); |
| 874 | &i)) || | 879 | if (error || i == 0) { |
| 875 | i == 0) { | ||
| 876 | xfs_buf_relse(agbp); | 880 | xfs_buf_relse(agbp); |
| 877 | agbp = NULL; | 881 | agbp = NULL; |
| 878 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | 882 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); |
| @@ -881,10 +885,12 @@ xfs_inumbers( | |||
| 881 | agino = 0; | 885 | agino = 0; |
| 882 | continue; | 886 | continue; |
| 883 | } | 887 | } |
| 884 | agino = gino + XFS_INODES_PER_CHUNK - 1; | 888 | agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; |
| 885 | buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino); | 889 | buffer[bufidx].xi_startino = |
| 886 | buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt; | 890 | XFS_AGINO_TO_INO(mp, agno, r.ir_startino); |
| 887 | buffer[bufidx].xi_allocmask = ~gfree; | 891 | buffer[bufidx].xi_alloccount = |
| 892 | XFS_INODES_PER_CHUNK - r.ir_freecount; | ||
| 893 | buffer[bufidx].xi_allocmask = ~r.ir_free; | ||
| 888 | bufidx++; | 894 | bufidx++; |
| 889 | left--; | 895 | left--; |
| 890 | if (bufidx == bcount) { | 896 | if (bufidx == bcount) { |
