aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNathan Scott <nathans@sgi.com>2006-09-27 21:02:09 -0400
committerTim Shimmin <tes@sgi.com>2006-09-27 21:02:09 -0400
commitbb3c7d2936b6db6f5ded9abf4d215abe97af8372 (patch)
tree0958e0102e703cceb42c4d4947227ac29642456e
parent2627509330323efc88b5818065cba737e000de5c (diff)
[XFS] Increase the size of the buffer holding the local inode cluster
list, to increase our potential readahead window and in turn improve bulkstat performance. SGI-PV: 944409 SGI-Modid: xfs-linux-melb:xfs-kern:26607a Signed-off-by: Nathan Scott <nathans@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
-rw-r--r--fs/xfs/xfs_itable.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index e6dbe6ba6fbd..315c9bcd3be3 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -324,6 +324,8 @@ xfs_bulkstat(
324 xfs_agino_t gino; /* current btree rec's start inode */ 324 xfs_agino_t gino; /* current btree rec's start inode */
325 int i; /* loop index */ 325 int i; /* loop index */
326 int icount; /* count of inodes good in irbuf */ 326 int icount; /* count of inodes good in irbuf */
327 int irbsize; /* size of irec buffer in bytes */
328 unsigned int kmflags; /* flags for allocating irec buffer */
327 xfs_ino_t ino; /* inode number (filesystem) */ 329 xfs_ino_t ino; /* inode number (filesystem) */
328 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ 330 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
329 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 331 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
@@ -369,12 +371,20 @@ xfs_bulkstat(
369 nimask = ~(nicluster - 1); 371 nimask = ~(nicluster - 1);
370 nbcluster = nicluster >> mp->m_sb.sb_inopblog; 372 nbcluster = nicluster >> mp->m_sb.sb_inopblog;
371 /* 373 /*
372 * Allocate a page-sized buffer for inode btree records. 374 * Allocate a local buffer for inode cluster btree records.
373 * We could try allocating something smaller, but for normal 375 * This caps our maximum readahead window (so don't be stingy)
374 * calls we'll always (potentially) need the whole page. 376 * but we must handle the case where we can't get a contiguous
377 * multi-page buffer, so we drop back toward pagesize; the end
378 * case we ensure succeeds, via appropriate allocation flags.
375 */ 379 */
376 irbuf = kmem_alloc(NBPC, KM_SLEEP); 380 irbsize = NBPP * 4;
377 nirbuf = NBPC / sizeof(*irbuf); 381 kmflags = KM_SLEEP | KM_MAYFAIL;
382 while (!(irbuf = kmem_alloc(irbsize, kmflags))) {
383 if ((irbsize >>= 1) <= NBPP)
384 kmflags = KM_SLEEP;
385 }
386 nirbuf = irbsize / sizeof(*irbuf);
387
378 /* 388 /*
379 * Loop over the allocation groups, starting from the last 389 * Loop over the allocation groups, starting from the last
380 * inode returned; 0 means start of the allocation group. 390 * inode returned; 0 means start of the allocation group.
@@ -672,7 +682,7 @@ xfs_bulkstat(
672 /* 682 /*
673 * Done, we're either out of filesystem or space to put the data. 683 * Done, we're either out of filesystem or space to put the data.
674 */ 684 */
675 kmem_free(irbuf, NBPC); 685 kmem_free(irbuf, irbsize);
676 *ubcountp = ubelem; 686 *ubcountp = ubelem;
677 if (agno >= mp->m_sb.sb_agcount) { 687 if (agno >= mp->m_sb.sb_agcount) {
678 /* 688 /*