summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2019-07-02 12:39:41 -0400
committerDarrick J. Wong <darrick.wong@oracle.com>2019-07-02 12:40:05 -0400
commitda1d9e5912477c2f090202052ddd2a77cea6669c (patch)
tree081b12962cb8fd9e64a054e211da41502e28df89
parent938c710d99a62eed93b6a2770f92f943762beca0 (diff)
xfs: move bulkstat ichunk helpers to iwalk code
Now that we've reworked the bulkstat code to use iwalk, we can move the old bulkstat ichunk helpers to xfs_iwalk.c. No functional changes here. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Brian Foster <bfoster@redhat.com>
-rw-r--r--fs/xfs/xfs_itable.c93
-rw-r--r--fs/xfs/xfs_itable.h8
-rw-r--r--fs/xfs/xfs_iwalk.c96
3 files changed, 93 insertions, 104 deletions
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 118ff1b686c1..8da5e978119d 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -188,99 +188,6 @@ xfs_bulkstat_one(
188 return error; 188 return error;
189} 189}
190 190
191/*
192 * Loop over all clusters in a chunk for a given incore inode allocation btree
193 * record. Do a readahead if there are any allocated inodes in that cluster.
194 */
195void
196xfs_bulkstat_ichunk_ra(
197 struct xfs_mount *mp,
198 xfs_agnumber_t agno,
199 struct xfs_inobt_rec_incore *irec)
200{
201 struct xfs_ino_geometry *igeo = M_IGEO(mp);
202 xfs_agblock_t agbno;
203 struct blk_plug plug;
204 int i; /* inode chunk index */
205
206 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
207
208 blk_start_plug(&plug);
209 for (i = 0;
210 i < XFS_INODES_PER_CHUNK;
211 i += igeo->inodes_per_cluster,
212 agbno += igeo->blocks_per_cluster) {
213 if (xfs_inobt_maskn(i, igeo->inodes_per_cluster) &
214 ~irec->ir_free) {
215 xfs_btree_reada_bufs(mp, agno, agbno,
216 igeo->blocks_per_cluster,
217 &xfs_inode_buf_ops);
218 }
219 }
220 blk_finish_plug(&plug);
221}
222
223/*
224 * Lookup the inode chunk that the given inode lives in and then get the record
225 * if we found the chunk. If the inode was not the last in the chunk and there
226 * are some left allocated, update the data for the pointed-to record as well as
227 * return the count of grabbed inodes.
228 */
229int
230xfs_bulkstat_grab_ichunk(
231 struct xfs_btree_cur *cur, /* btree cursor */
232 xfs_agino_t agino, /* starting inode of chunk */
233 int *icount,/* return # of inodes grabbed */
234 struct xfs_inobt_rec_incore *irec) /* btree record */
235{
236 int idx; /* index into inode chunk */
237 int stat;
238 int error = 0;
239
240 /* Lookup the inode chunk that this inode lives in */
241 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
242 if (error)
243 return error;
244 if (!stat) {
245 *icount = 0;
246 return error;
247 }
248
249 /* Get the record, should always work */
250 error = xfs_inobt_get_rec(cur, irec, &stat);
251 if (error)
252 return error;
253 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1);
254
255 /* Check if the record contains the inode in request */
256 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
257 *icount = 0;
258 return 0;
259 }
260
261 idx = agino - irec->ir_startino + 1;
262 if (idx < XFS_INODES_PER_CHUNK &&
263 (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
264 int i;
265
266 /* We got a right chunk with some left inodes allocated at it.
267 * Grab the chunk record. Mark all the uninteresting inodes
268 * free -- because they're before our start point.
269 */
270 for (i = 0; i < idx; i++) {
271 if (XFS_INOBT_MASK(i) & ~irec->ir_free)
272 irec->ir_freecount++;
273 }
274
275 irec->ir_free |= xfs_inobt_maskn(0, idx);
276 *icount = irec->ir_count - irec->ir_freecount;
277 }
278
279 return 0;
280}
281
282#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
283
284static int 191static int
285xfs_bulkstat_iwalk( 192xfs_bulkstat_iwalk(
286 struct xfs_mount *mp, 193 struct xfs_mount *mp,
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 624ffbf8cd85..1db1cd30aa29 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -64,12 +64,4 @@ xfs_inumbers(
64 void __user *buffer, /* buffer with inode info */ 64 void __user *buffer, /* buffer with inode info */
65 inumbers_fmt_pf formatter); 65 inumbers_fmt_pf formatter);
66 66
67/* Temporarily needed while we refactor functions. */
68struct xfs_btree_cur;
69struct xfs_inobt_rec_incore;
70void xfs_bulkstat_ichunk_ra(struct xfs_mount *mp, xfs_agnumber_t agno,
71 struct xfs_inobt_rec_incore *irec);
72int xfs_bulkstat_grab_ichunk(struct xfs_btree_cur *cur, xfs_agino_t agino,
73 int *icount, struct xfs_inobt_rec_incore *irec);
74
75#endif /* __XFS_ITABLE_H__ */ 67#endif /* __XFS_ITABLE_H__ */
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index 4aa22f02b9ec..0098d6653daf 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -15,7 +15,6 @@
15#include "xfs_ialloc.h" 15#include "xfs_ialloc.h"
16#include "xfs_ialloc_btree.h" 16#include "xfs_ialloc_btree.h"
17#include "xfs_iwalk.h" 17#include "xfs_iwalk.h"
18#include "xfs_itable.h"
19#include "xfs_error.h" 18#include "xfs_error.h"
20#include "xfs_trace.h" 19#include "xfs_trace.h"
21#include "xfs_icache.h" 20#include "xfs_icache.h"
@@ -66,6 +65,97 @@ struct xfs_iwalk_ag {
66 void *data; 65 void *data;
67}; 66};
68 67
68/*
69 * Loop over all clusters in a chunk for a given incore inode allocation btree
70 * record. Do a readahead if there are any allocated inodes in that cluster.
71 */
72STATIC void
73xfs_iwalk_ichunk_ra(
74 struct xfs_mount *mp,
75 xfs_agnumber_t agno,
76 struct xfs_inobt_rec_incore *irec)
77{
78 struct xfs_ino_geometry *igeo = M_IGEO(mp);
79 xfs_agblock_t agbno;
80 struct blk_plug plug;
81 int i; /* inode chunk index */
82
83 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
84
85 blk_start_plug(&plug);
86 for (i = 0;
87 i < XFS_INODES_PER_CHUNK;
88 i += igeo->inodes_per_cluster,
89 agbno += igeo->blocks_per_cluster) {
90 if (xfs_inobt_maskn(i, igeo->inodes_per_cluster) &
91 ~irec->ir_free) {
92 xfs_btree_reada_bufs(mp, agno, agbno,
93 igeo->blocks_per_cluster,
94 &xfs_inode_buf_ops);
95 }
96 }
97 blk_finish_plug(&plug);
98}
99
100/*
101 * Lookup the inode chunk that the given inode lives in and then get the record
102 * if we found the chunk. If the inode was not the last in the chunk and there
103 * are some left allocated, update the data for the pointed-to record as well as
104 * return the count of grabbed inodes.
105 */
106STATIC int
107xfs_iwalk_grab_ichunk(
108 struct xfs_btree_cur *cur, /* btree cursor */
109 xfs_agino_t agino, /* starting inode of chunk */
110 int *icount,/* return # of inodes grabbed */
111 struct xfs_inobt_rec_incore *irec) /* btree record */
112{
113 int idx; /* index into inode chunk */
114 int stat;
115 int error = 0;
116
117 /* Lookup the inode chunk that this inode lives in */
118 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
119 if (error)
120 return error;
121 if (!stat) {
122 *icount = 0;
123 return error;
124 }
125
126 /* Get the record, should always work */
127 error = xfs_inobt_get_rec(cur, irec, &stat);
128 if (error)
129 return error;
130 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1);
131
132 /* Check if the record contains the inode in request */
133 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
134 *icount = 0;
135 return 0;
136 }
137
138 idx = agino - irec->ir_startino + 1;
139 if (idx < XFS_INODES_PER_CHUNK &&
140 (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
141 int i;
142
143 /* We got a right chunk with some left inodes allocated at it.
144 * Grab the chunk record. Mark all the uninteresting inodes
145 * free -- because they're before our start point.
146 */
147 for (i = 0; i < idx; i++) {
148 if (XFS_INOBT_MASK(i) & ~irec->ir_free)
149 irec->ir_freecount++;
150 }
151
152 irec->ir_free |= xfs_inobt_maskn(0, idx);
153 *icount = irec->ir_count - irec->ir_freecount;
154 }
155
156 return 0;
157}
158
69/* Allocate memory for a walk. */ 159/* Allocate memory for a walk. */
70STATIC int 160STATIC int
71xfs_iwalk_alloc( 161xfs_iwalk_alloc(
@@ -191,7 +281,7 @@ xfs_iwalk_ag_start(
191 * We require a lookup cache of at least two elements so that we don't 281 * We require a lookup cache of at least two elements so that we don't
192 * have to deal with tearing down the cursor to walk the records. 282 * have to deal with tearing down the cursor to walk the records.
193 */ 283 */
194 error = xfs_bulkstat_grab_ichunk(*curpp, agino - 1, &icount, 284 error = xfs_iwalk_grab_ichunk(*curpp, agino - 1, &icount,
195 &iwag->recs[iwag->nr_recs]); 285 &iwag->recs[iwag->nr_recs]);
196 if (error) 286 if (error)
197 return error; 287 return error;
@@ -298,7 +388,7 @@ xfs_iwalk_ag(
298 * Start readahead for this inode chunk in anticipation of 388 * Start readahead for this inode chunk in anticipation of
299 * walking the inodes. 389 * walking the inodes.
300 */ 390 */
301 xfs_bulkstat_ichunk_ra(mp, agno, irec); 391 xfs_iwalk_ichunk_ra(mp, agno, irec);
302 392
303 /* 393 /*
304 * If there's space in the buffer for more records, increment 394 * If there's space in the buffer for more records, increment