aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorNathan Scott <nathans@sgi.com>2006-09-27 21:02:03 -0400
committerTim Shimmin <tes@sgi.com>2006-09-27 21:02:03 -0400
commit2627509330323efc88b5818065cba737e000de5c (patch)
tree1305a2809944fb5baf7f0287db6b59687cfed81c /fs
parent51bdd70681e247184b81c2de61dbc26154511155 (diff)
[XFS] Drop unneeded endian conversion in bulkstat and start readahead for
batches of inode cluster buffers at once, before any blocking reads are issued. SGI-PV: 944409 SGI-Modid: xfs-linux-melb:xfs-kern:26606a Signed-off-by: Nathan Scott <nathans@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/xfs_itable.c68
1 files changed, 31 insertions, 37 deletions
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 305a9d0436f4..e6dbe6ba6fbd 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -325,9 +325,9 @@ xfs_bulkstat(
325 int i; /* loop index */ 325 int i; /* loop index */
326 int icount; /* count of inodes good in irbuf */ 326 int icount; /* count of inodes good in irbuf */
327 xfs_ino_t ino; /* inode number (filesystem) */ 327 xfs_ino_t ino; /* inode number (filesystem) */
328 xfs_inobt_rec_t *irbp; /* current irec buffer pointer */ 328 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
329 xfs_inobt_rec_t *irbuf; /* start of irec buffer */ 329 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
330 xfs_inobt_rec_t *irbufend; /* end of good irec buffer entries */ 330 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
331 xfs_ino_t lastino=0; /* last inode number returned */ 331 xfs_ino_t lastino=0; /* last inode number returned */
332 int nbcluster; /* # of blocks in a cluster */ 332 int nbcluster; /* # of blocks in a cluster */
333 int nicluster; /* # of inodes in a cluster */ 333 int nicluster; /* # of inodes in a cluster */
@@ -398,7 +398,7 @@ xfs_bulkstat(
398 * Allocate and initialize a btree cursor for ialloc btree. 398 * Allocate and initialize a btree cursor for ialloc btree.
399 */ 399 */
400 cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO, 400 cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO,
401 (xfs_inode_t *)0, 0); 401 (xfs_inode_t *)0, 0);
402 irbp = irbuf; 402 irbp = irbuf;
403 irbufend = irbuf + nirbuf; 403 irbufend = irbuf + nirbuf;
404 end_of_ag = 0; 404 end_of_ag = 0;
@@ -435,9 +435,9 @@ xfs_bulkstat(
435 gcnt++; 435 gcnt++;
436 } 436 }
437 gfree |= XFS_INOBT_MASKN(0, chunkidx); 437 gfree |= XFS_INOBT_MASKN(0, chunkidx);
438 irbp->ir_startino = cpu_to_be32(gino); 438 irbp->ir_startino = gino;
439 irbp->ir_freecount = cpu_to_be32(gcnt); 439 irbp->ir_freecount = gcnt;
440 irbp->ir_free = cpu_to_be64(gfree); 440 irbp->ir_free = gfree;
441 irbp++; 441 irbp++;
442 agino = gino + XFS_INODES_PER_CHUNK; 442 agino = gino + XFS_INODES_PER_CHUNK;
443 icount = XFS_INODES_PER_CHUNK - gcnt; 443 icount = XFS_INODES_PER_CHUNK - gcnt;
@@ -491,11 +491,27 @@ xfs_bulkstat(
491 } 491 }
492 /* 492 /*
493 * If this chunk has any allocated inodes, save it. 493 * If this chunk has any allocated inodes, save it.
494 * Also start read-ahead now for this chunk.
494 */ 495 */
495 if (gcnt < XFS_INODES_PER_CHUNK) { 496 if (gcnt < XFS_INODES_PER_CHUNK) {
496 irbp->ir_startino = cpu_to_be32(gino); 497 /*
497 irbp->ir_freecount = cpu_to_be32(gcnt); 498 * Loop over all clusters in the next chunk.
498 irbp->ir_free = cpu_to_be64(gfree); 499 * Do a readahead if there are any allocated
500 * inodes in that cluster.
501 */
502 for (agbno = XFS_AGINO_TO_AGBNO(mp, gino),
503 chunkidx = 0;
504 chunkidx < XFS_INODES_PER_CHUNK;
505 chunkidx += nicluster,
506 agbno += nbcluster) {
507 if (XFS_INOBT_MASKN(chunkidx,
508 nicluster) & ~gfree)
509 xfs_btree_reada_bufs(mp, agno,
510 agbno, nbcluster);
511 }
512 irbp->ir_startino = gino;
513 irbp->ir_freecount = gcnt;
514 irbp->ir_free = gfree;
499 irbp++; 515 irbp++;
500 icount += XFS_INODES_PER_CHUNK - gcnt; 516 icount += XFS_INODES_PER_CHUNK - gcnt;
501 } 517 }
@@ -519,33 +535,11 @@ xfs_bulkstat(
519 for (irbp = irbuf; 535 for (irbp = irbuf;
520 irbp < irbufend && ubleft >= statstruct_size; irbp++) { 536 irbp < irbufend && ubleft >= statstruct_size; irbp++) {
521 /* 537 /*
522 * Read-ahead the next chunk's worth of inodes.
523 */
524 if (&irbp[1] < irbufend) {
525 /*
526 * Loop over all clusters in the next chunk.
527 * Do a readahead if there are any allocated
528 * inodes in that cluster.
529 */
530 for (agbno = XFS_AGINO_TO_AGBNO(mp,
531 be32_to_cpu(irbp[1].ir_startino)),
532 chunkidx = 0;
533 chunkidx < XFS_INODES_PER_CHUNK;
534 chunkidx += nicluster,
535 agbno += nbcluster) {
536 if (XFS_INOBT_MASKN(chunkidx,
537 nicluster) &
538 ~(be64_to_cpu(irbp[1].ir_free)))
539 xfs_btree_reada_bufs(mp, agno,
540 agbno, nbcluster);
541 }
542 }
543 /*
544 * Now process this chunk of inodes. 538 * Now process this chunk of inodes.
545 */ 539 */
546 for (agino = be32_to_cpu(irbp->ir_startino), chunkidx = 0, clustidx = 0; 540 for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
547 ubleft > 0 && 541 ubleft > 0 &&
548 be32_to_cpu(irbp->ir_freecount) < XFS_INODES_PER_CHUNK; 542 irbp->ir_freecount < XFS_INODES_PER_CHUNK;
549 chunkidx++, clustidx++, agino++) { 543 chunkidx++, clustidx++, agino++) {
550 ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 544 ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
551 /* 545 /*
@@ -565,7 +559,7 @@ xfs_bulkstat(
565 */ 559 */
566 if ((chunkidx & (nicluster - 1)) == 0) { 560 if ((chunkidx & (nicluster - 1)) == 0) {
567 agbno = XFS_AGINO_TO_AGBNO(mp, 561 agbno = XFS_AGINO_TO_AGBNO(mp,
568 be32_to_cpu(irbp->ir_startino)) + 562 irbp->ir_startino) +
569 ((chunkidx & nimask) >> 563 ((chunkidx & nimask) >>
570 mp->m_sb.sb_inopblog); 564 mp->m_sb.sb_inopblog);
571 565
@@ -605,13 +599,13 @@ xfs_bulkstat(
605 /* 599 /*
606 * Skip if this inode is free. 600 * Skip if this inode is free.
607 */ 601 */
608 if (XFS_INOBT_MASK(chunkidx) & be64_to_cpu(irbp->ir_free)) 602 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
609 continue; 603 continue;
610 /* 604 /*
611 * Count used inodes as free so we can tell 605 * Count used inodes as free so we can tell
612 * when the chunk is used up. 606 * when the chunk is used up.
613 */ 607 */
614 be32_add(&irbp->ir_freecount, 1); 608 irbp->ir_freecount++;
615 ino = XFS_AGINO_TO_INO(mp, agno, agino); 609 ino = XFS_AGINO_TO_INO(mp, agno, agino);
616 bno = XFS_AGB_TO_DADDR(mp, agno, agbno); 610 bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
617 if (!xfs_bulkstat_use_dinode(mp, flags, bp, 611 if (!xfs_bulkstat_use_dinode(mp, flags, bp,