aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorJie Liu <jeff.liu@oracle.com>2014-07-24 04:41:18 -0400
committerDave Chinner <david@fromorbit.com>2014-07-24 04:41:18 -0400
commit4b8fdfecd84528e044a6ca32242de641b203995b (patch)
treed2549097d32b5d297218891d1f55910633a398e7 /fs/xfs
parentd4c27348751bffeb57d87fc631f76716e3036c6d (diff)
xfs: introduce xfs_bulkstat_ichunk_ra
From: Jie Liu <jeff.liu@oracle.com> Introduce xfs_bulkstat_ichunk_ra() to loop over all clusters in the next inode chunk, then performs readahead if there are any allocated inodes in that cluster. Refactor xfs_bulkstat() with it. Signed-off-by: Jie Liu <jeff.liu@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_itable.c56
1 files changed, 32 insertions, 24 deletions
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index dfc4aa52ff83..b50816789f2b 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -172,6 +172,37 @@ xfs_bulkstat_one(
172 xfs_bulkstat_one_fmt, ubused, stat); 172 xfs_bulkstat_one_fmt, ubused, stat);
173} 173}
174 174
175/*
176 * Loop over all clusters in a chunk for a given incore inode allocation btree
177 * record. Do a readahead if there are any allocated inodes in that cluster.
178 */
179STATIC void
180xfs_bulkstat_ichunk_ra(
181 struct xfs_mount *mp,
182 xfs_agnumber_t agno,
183 struct xfs_inobt_rec_incore *irec)
184{
185 xfs_agblock_t agbno;
186 struct blk_plug plug;
187 int blks_per_cluster;
188 int inodes_per_cluster;
189 int i; /* inode chunk index */
190
191 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
192 blks_per_cluster = xfs_icluster_size_fsb(mp);
193 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
194
195 blk_start_plug(&plug);
196 for (i = 0; i < XFS_INODES_PER_CHUNK;
197 i += inodes_per_cluster, agbno += blks_per_cluster) {
198 if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
199 xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
200 &xfs_inode_buf_ops);
201 }
202 }
203 blk_finish_plug(&plug);
204}
205
175#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 206#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
176 207
177/* 208/*
@@ -187,7 +218,6 @@ xfs_bulkstat(
187 char __user *ubuffer, /* buffer with inode stats */ 218 char __user *ubuffer, /* buffer with inode stats */
188 int *done) /* 1 if there are more stats to get */ 219 int *done) /* 1 if there are more stats to get */
189{ 220{
190 xfs_agblock_t agbno=0;/* allocation group block number */
191 xfs_buf_t *agbp; /* agi header buffer */ 221 xfs_buf_t *agbp; /* agi header buffer */
192 xfs_agi_t *agi; /* agi header data */ 222 xfs_agi_t *agi; /* agi header data */
193 xfs_agino_t agino; /* inode # in allocation group */ 223 xfs_agino_t agino; /* inode # in allocation group */
@@ -206,8 +236,6 @@ xfs_bulkstat(
206 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 236 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
207 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ 237 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
208 xfs_ino_t lastino; /* last inode number returned */ 238 xfs_ino_t lastino; /* last inode number returned */
209 int blks_per_cluster; /* # of blocks per cluster */
210 int inodes_per_cluster;/* # of inodes per cluster */
211 int nirbuf; /* size of irbuf */ 239 int nirbuf; /* size of irbuf */
212 int rval; /* return value error code */ 240 int rval; /* return value error code */
213 int tmp; /* result value from btree calls */ 241 int tmp; /* result value from btree calls */
@@ -237,8 +265,6 @@ xfs_bulkstat(
237 *done = 0; 265 *done = 0;
238 fmterror = 0; 266 fmterror = 0;
239 ubufp = ubuffer; 267 ubufp = ubuffer;
240 blks_per_cluster = xfs_icluster_size_fsb(mp);
241 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
242 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 268 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
243 if (!irbuf) 269 if (!irbuf)
244 return -ENOMEM; 270 return -ENOMEM;
@@ -347,25 +373,7 @@ xfs_bulkstat(
347 * Also start read-ahead now for this chunk. 373 * Also start read-ahead now for this chunk.
348 */ 374 */
349 if (r.ir_freecount < XFS_INODES_PER_CHUNK) { 375 if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
350 struct blk_plug plug; 376 xfs_bulkstat_ichunk_ra(mp, agno, &r);
351 /*
352 * Loop over all clusters in the next chunk.
353 * Do a readahead if there are any allocated
354 * inodes in that cluster.
355 */
356 blk_start_plug(&plug);
357 agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
358 for (chunkidx = 0;
359 chunkidx < XFS_INODES_PER_CHUNK;
360 chunkidx += inodes_per_cluster,
361 agbno += blks_per_cluster) {
362 if (xfs_inobt_maskn(chunkidx,
363 inodes_per_cluster) & ~r.ir_free)
364 xfs_btree_reada_bufs(mp, agno,
365 agbno, blks_per_cluster,
366 &xfs_inode_buf_ops);
367 }
368 blk_finish_plug(&plug);
369 irbp->ir_startino = r.ir_startino; 377 irbp->ir_startino = r.ir_startino;
370 irbp->ir_freecount = r.ir_freecount; 378 irbp->ir_freecount = r.ir_freecount;
371 irbp->ir_free = r.ir_free; 379 irbp->ir_free = r.ir_free;