aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2019-04-25 21:26:24 -0400
committerDarrick J. Wong <darrick.wong@oracle.com>2019-04-30 11:19:13 -0400
commit75efa57d0bf5fcf650a183f0ce0dc011ba8c4bc8 (patch)
treed6dfd62d2680be7cdb73e61e857e833f12ae0a78 /fs/xfs
parent94079285756d66b1aded4e36d35452cb1f9c953a (diff)
xfs: add online scrub for superblock counters
Teach online scrub how to check the filesystem summary counters. We use the incore delalloc block counter along with the incore AG headers to compute expected values for fdblocks, icount, and ifree, and then check that the percpu counter is within a certain threshold of the expected value. This is done to avoid having to freeze or otherwise lock the filesystem, which means that we're only checking that the counters are fairly close, not that they're exactly correct. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/libxfs/xfs_fs.h3
-rw-r--r--fs/xfs/libxfs/xfs_types.c2
-rw-r--r--fs/xfs/libxfs/xfs_types.h2
-rw-r--r--fs/xfs/scrub/common.c9
-rw-r--r--fs/xfs/scrub/common.h2
-rw-r--r--fs/xfs/scrub/fscounters.c366
-rw-r--r--fs/xfs/scrub/health.c1
-rw-r--r--fs/xfs/scrub/scrub.c6
-rw-r--r--fs/xfs/scrub/scrub.h9
-rw-r--r--fs/xfs/scrub/trace.h63
11 files changed, 461 insertions, 3 deletions
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index b20964e26a22..1dfc6df2e2bd 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -143,6 +143,7 @@ xfs-y += $(addprefix scrub/, \
143 common.o \ 143 common.o \
144 dabtree.o \ 144 dabtree.o \
145 dir.o \ 145 dir.o \
146 fscounters.o \
146 health.o \ 147 health.o \
147 ialloc.o \ 148 ialloc.o \
148 inode.o \ 149 inode.o \
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 43a53b03247b..e7382c780ed7 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -578,9 +578,10 @@ struct xfs_scrub_metadata {
578#define XFS_SCRUB_TYPE_UQUOTA 21 /* user quotas */ 578#define XFS_SCRUB_TYPE_UQUOTA 21 /* user quotas */
579#define XFS_SCRUB_TYPE_GQUOTA 22 /* group quotas */ 579#define XFS_SCRUB_TYPE_GQUOTA 22 /* group quotas */
580#define XFS_SCRUB_TYPE_PQUOTA 23 /* project quotas */ 580#define XFS_SCRUB_TYPE_PQUOTA 23 /* project quotas */
581#define XFS_SCRUB_TYPE_FSCOUNTERS 24 /* fs summary counters */
581 582
582/* Number of scrub subcommands. */ 583/* Number of scrub subcommands. */
583#define XFS_SCRUB_TYPE_NR 24 584#define XFS_SCRUB_TYPE_NR 25
584 585
585/* i: Repair this metadata. */ 586/* i: Repair this metadata. */
586#define XFS_SCRUB_IFLAG_REPAIR (1 << 0) 587#define XFS_SCRUB_IFLAG_REPAIR (1 << 0)
diff --git a/fs/xfs/libxfs/xfs_types.c b/fs/xfs/libxfs/xfs_types.c
index de310712dd6d..d51acc95bc00 100644
--- a/fs/xfs/libxfs/xfs_types.c
+++ b/fs/xfs/libxfs/xfs_types.c
@@ -185,7 +185,7 @@ xfs_verify_rtbno(
185} 185}
186 186
187/* Calculate the range of valid icount values. */ 187/* Calculate the range of valid icount values. */
188static void 188void
189xfs_icount_range( 189xfs_icount_range(
190 struct xfs_mount *mp, 190 struct xfs_mount *mp,
191 unsigned long long *min, 191 unsigned long long *min,
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index c5a25403b4db..802b34cd10fe 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -191,5 +191,7 @@ bool xfs_verify_dir_ino(struct xfs_mount *mp, xfs_ino_t ino);
191bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno); 191bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
192bool xfs_verify_icount(struct xfs_mount *mp, unsigned long long icount); 192bool xfs_verify_icount(struct xfs_mount *mp, unsigned long long icount);
193bool xfs_verify_dablk(struct xfs_mount *mp, xfs_fileoff_t off); 193bool xfs_verify_dablk(struct xfs_mount *mp, xfs_fileoff_t off);
194void xfs_icount_range(struct xfs_mount *mp, unsigned long long *min,
195 unsigned long long *max);
194 196
195#endif /* __XFS_TYPES_H__ */ 197#endif /* __XFS_TYPES_H__ */
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 7d7e91a7bb86..973aa59975e3 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -209,6 +209,15 @@ xchk_ino_set_preen(
209 trace_xchk_ino_preen(sc, ino, __return_address); 209 trace_xchk_ino_preen(sc, ino, __return_address);
210} 210}
211 211
212/* Record something being wrong with the filesystem primary superblock. */
213void
214xchk_set_corrupt(
215 struct xfs_scrub *sc)
216{
217 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
218 trace_xchk_fs_error(sc, 0, __return_address);
219}
220
212/* Record a corrupt block. */ 221/* Record a corrupt block. */
213void 222void
214xchk_block_set_corrupt( 223xchk_block_set_corrupt(
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 84900bfad852..003a772cd26c 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -39,6 +39,7 @@ void xchk_block_set_preen(struct xfs_scrub *sc,
39 struct xfs_buf *bp); 39 struct xfs_buf *bp);
40void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino); 40void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
41 41
42void xchk_set_corrupt(struct xfs_scrub *sc);
42void xchk_block_set_corrupt(struct xfs_scrub *sc, 43void xchk_block_set_corrupt(struct xfs_scrub *sc,
43 struct xfs_buf *bp); 44 struct xfs_buf *bp);
44void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino); 45void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
@@ -105,6 +106,7 @@ xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip)
105 return -ENOENT; 106 return -ENOENT;
106} 107}
107#endif 108#endif
109int xchk_setup_fscounters(struct xfs_scrub *sc, struct xfs_inode *ip);
108 110
109void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa); 111void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
110int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno, 112int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
new file mode 100644
index 000000000000..07c11e3e6437
--- /dev/null
+++ b/fs/xfs/scrub/fscounters.c
@@ -0,0 +1,366 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2019 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_defer.h"
13#include "xfs_btree.h"
14#include "xfs_bit.h"
15#include "xfs_log_format.h"
16#include "xfs_trans.h"
17#include "xfs_sb.h"
18#include "xfs_inode.h"
19#include "xfs_alloc.h"
20#include "xfs_ialloc.h"
21#include "xfs_rmap.h"
22#include "xfs_error.h"
23#include "xfs_errortag.h"
24#include "xfs_icache.h"
25#include "xfs_health.h"
26#include "xfs_bmap.h"
27#include "scrub/xfs_scrub.h"
28#include "scrub/scrub.h"
29#include "scrub/common.h"
30#include "scrub/trace.h"
31
32/*
33 * FS Summary Counters
34 * ===================
35 *
36 * The basics of filesystem summary counter checking are that we iterate the
37 * AGs counting the number of free blocks, free space btree blocks, per-AG
38 * reservations, inodes, delayed allocation reservations, and free inodes.
39 * Then we compare what we computed against the in-core counters.
40 *
41 * However, the reality is that summary counters are a tricky beast to check.
42 * While we /could/ freeze the filesystem and scramble around the AGs counting
43 * the free blocks, in practice we prefer not do that for a scan because
44 * freezing is costly. To get around this, we added a per-cpu counter of the
45 * delalloc reservations so that we can rotor around the AGs relatively
46 * quickly, and we allow the counts to be slightly off because we're not taking
47 * any locks while we do this.
48 *
49 * So the first thing we do is warm up the buffer cache in the setup routine by
50 * walking all the AGs to make sure the incore per-AG structure has been
51 * initialized. The expected value calculation then iterates the incore per-AG
52 * structures as quickly as it can. We snapshot the percpu counters before and
53 * after this operation and use the difference in counter values to guess at
54 * our tolerance for mismatch between expected and actual counter values.
55 */
56
57/*
58 * Since the expected value computation is lockless but only browses incore
59 * values, the percpu counters should be fairly close to each other. However,
60 * we'll allow ourselves to be off by at least this (arbitrary) amount.
61 */
62#define XCHK_FSCOUNT_MIN_VARIANCE (512)
63
64/*
65 * Make sure the per-AG structure has been initialized from the on-disk header
66 * contents and trust that the incore counters match the ondisk counters. (The
67 * AGF and AGI scrubbers check them, and a normal xfs_scrub run checks the
68 * summary counters after checking all AG headers). Do this from the setup
69 * function so that the inner AG aggregation loop runs as quickly as possible.
70 *
71 * This function runs during the setup phase /before/ we start checking any
72 * metadata.
73 */
74STATIC int
75xchk_fscount_warmup(
76 struct xfs_scrub *sc)
77{
78 struct xfs_mount *mp = sc->mp;
79 struct xfs_buf *agi_bp = NULL;
80 struct xfs_buf *agf_bp = NULL;
81 struct xfs_perag *pag = NULL;
82 xfs_agnumber_t agno;
83 int error = 0;
84
85 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
86 pag = xfs_perag_get(mp, agno);
87
88 if (pag->pagi_init && pag->pagf_init)
89 goto next_loop_perag;
90
91 /* Lock both AG headers. */
92 error = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp);
93 if (error)
94 break;
95 error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp);
96 if (error)
97 break;
98 error = -ENOMEM;
99 if (!agf_bp || !agi_bp)
100 break;
101
102 /*
103 * These are supposed to be initialized by the header read
104 * function.
105 */
106 error = -EFSCORRUPTED;
107 if (!pag->pagi_init || !pag->pagf_init)
108 break;
109
110 xfs_buf_relse(agf_bp);
111 agf_bp = NULL;
112 xfs_buf_relse(agi_bp);
113 agi_bp = NULL;
114next_loop_perag:
115 xfs_perag_put(pag);
116 pag = NULL;
117 error = 0;
118
119 if (fatal_signal_pending(current))
120 break;
121 }
122
123 if (agf_bp)
124 xfs_buf_relse(agf_bp);
125 if (agi_bp)
126 xfs_buf_relse(agi_bp);
127 if (pag)
128 xfs_perag_put(pag);
129 return error;
130}
131
132int
133xchk_setup_fscounters(
134 struct xfs_scrub *sc,
135 struct xfs_inode *ip)
136{
137 struct xchk_fscounters *fsc;
138 int error;
139
140 sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), KM_SLEEP);
141 if (!sc->buf)
142 return -ENOMEM;
143 fsc = sc->buf;
144
145 xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max);
146
147 /* We must get the incore counters set up before we can proceed. */
148 error = xchk_fscount_warmup(sc);
149 if (error)
150 return error;
151
152 /*
153 * Pause background reclaim while we're scrubbing to reduce the
154 * likelihood of background perturbations to the counters throwing off
155 * our calculations.
156 */
157 xchk_stop_reaping(sc);
158
159 return xchk_trans_alloc(sc, 0);
160}
161
162/*
163 * Calculate what the global in-core counters ought to be from the incore
164 * per-AG structure. Callers can compare this to the actual in-core counters
165 * to estimate by how much both in-core and on-disk counters need to be
166 * adjusted.
167 */
168STATIC int
169xchk_fscount_aggregate_agcounts(
170 struct xfs_scrub *sc,
171 struct xchk_fscounters *fsc)
172{
173 struct xfs_mount *mp = sc->mp;
174 struct xfs_perag *pag;
175 uint64_t delayed;
176 xfs_agnumber_t agno;
177 int tries = 8;
178
179retry:
180 fsc->icount = 0;
181 fsc->ifree = 0;
182 fsc->fdblocks = 0;
183
184 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
185 pag = xfs_perag_get(mp, agno);
186
187 /* This somehow got unset since the warmup? */
188 if (!pag->pagi_init || !pag->pagf_init) {
189 xfs_perag_put(pag);
190 return -EFSCORRUPTED;
191 }
192
193 /* Count all the inodes */
194 fsc->icount += pag->pagi_count;
195 fsc->ifree += pag->pagi_freecount;
196
197 /* Add up the free/freelist/bnobt/cntbt blocks */
198 fsc->fdblocks += pag->pagf_freeblks;
199 fsc->fdblocks += pag->pagf_flcount;
200 fsc->fdblocks += pag->pagf_btreeblks;
201
202 /*
203 * Per-AG reservations are taken out of the incore counters,
204 * so they must be left out of the free blocks computation.
205 */
206 fsc->fdblocks -= pag->pag_meta_resv.ar_reserved;
207 fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved;
208
209 xfs_perag_put(pag);
210
211 if (fatal_signal_pending(current))
212 break;
213 }
214
215 /*
216 * The global incore space reservation is taken from the incore
217 * counters, so leave that out of the computation.
218 */
219 fsc->fdblocks -= mp->m_resblks_avail;
220
221 /*
222 * Delayed allocation reservations are taken out of the incore counters
223 * but not recorded on disk, so leave them and their indlen blocks out
224 * of the computation.
225 */
226 delayed = percpu_counter_sum(&mp->m_delalloc_blks);
227 fsc->fdblocks -= delayed;
228
229 trace_xchk_fscounters_calc(mp, fsc->icount, fsc->ifree, fsc->fdblocks,
230 delayed);
231
232
233 /* Bail out if the values we compute are totally nonsense. */
234 if (fsc->icount < fsc->icount_min || fsc->icount > fsc->icount_max ||
235 fsc->fdblocks > mp->m_sb.sb_dblocks ||
236 fsc->ifree > fsc->icount_max)
237 return -EFSCORRUPTED;
238
239 /*
240 * If ifree > icount then we probably had some perturbation in the
241 * counters while we were calculating things. We'll try a few times
242 * to maintain ifree <= icount before giving up.
243 */
244 if (fsc->ifree > fsc->icount) {
245 if (tries--)
246 goto retry;
247 xchk_set_incomplete(sc);
248 return 0;
249 }
250
251 return 0;
252}
253
254/*
255 * Is the @counter reasonably close to the @expected value?
256 *
257 * We neither locked nor froze anything in the filesystem while aggregating the
258 * per-AG data to compute the @expected value, which means that the counter
259 * could have changed. We know the @old_value of the summation of the counter
260 * before the aggregation, and we re-sum the counter now. If the expected
261 * value falls between the two summations, we're ok.
262 *
263 * Otherwise, we /might/ have a problem. If the change in the summations is
264 * more than we want to tolerate, the filesystem is probably busy and we should
265 * just send back INCOMPLETE and see if userspace will try again.
266 */
267static inline bool
268xchk_fscount_within_range(
269 struct xfs_scrub *sc,
270 const int64_t old_value,
271 struct percpu_counter *counter,
272 uint64_t expected)
273{
274 int64_t min_value, max_value;
275 int64_t curr_value = percpu_counter_sum(counter);
276
277 trace_xchk_fscounters_within_range(sc->mp, expected, curr_value,
278 old_value);
279
280 /* Negative values are always wrong. */
281 if (curr_value < 0)
282 return false;
283
284 /* Exact matches are always ok. */
285 if (curr_value == expected)
286 return true;
287
288 min_value = min(old_value, curr_value);
289 max_value = max(old_value, curr_value);
290
291 /* Within the before-and-after range is ok. */
292 if (expected >= min_value && expected <= max_value)
293 return true;
294
295 /*
296 * If the difference between the two summations is too large, the fs
297 * might just be busy and so we'll mark the scrub incomplete. Return
298 * true here so that we don't mark the counter corrupt.
299 *
300 * XXX: In the future when userspace can grant scrub permission to
301 * quiesce the filesystem to solve the outsized variance problem, this
302 * check should be moved up and the return code changed to signal to
303 * userspace that we need quiesce permission.
304 */
305 if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) {
306 xchk_set_incomplete(sc);
307 return true;
308 }
309
310 return false;
311}
312
313/* Check the superblock counters. */
314int
315xchk_fscounters(
316 struct xfs_scrub *sc)
317{
318 struct xfs_mount *mp = sc->mp;
319 struct xchk_fscounters *fsc = sc->buf;
320 int64_t icount, ifree, fdblocks;
321 int error;
322
323 /* Snapshot the percpu counters. */
324 icount = percpu_counter_sum(&mp->m_icount);
325 ifree = percpu_counter_sum(&mp->m_ifree);
326 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
327
328 /* No negative values, please! */
329 if (icount < 0 || ifree < 0 || fdblocks < 0)
330 xchk_set_corrupt(sc);
331
332 /* See if icount is obviously wrong. */
333 if (icount < fsc->icount_min || icount > fsc->icount_max)
334 xchk_set_corrupt(sc);
335
336 /* See if fdblocks is obviously wrong. */
337 if (fdblocks > mp->m_sb.sb_dblocks)
338 xchk_set_corrupt(sc);
339
340 /*
341 * If ifree exceeds icount by more than the minimum variance then
342 * something's probably wrong with the counters.
343 */
344 if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE)
345 xchk_set_corrupt(sc);
346
347 /* Walk the incore AG headers to calculate the expected counters. */
348 error = xchk_fscount_aggregate_agcounts(sc, fsc);
349 if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error))
350 return error;
351 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
352 return 0;
353
354 /* Compare the in-core counters with whatever we counted. */
355 if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount))
356 xchk_set_corrupt(sc);
357
358 if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree))
359 xchk_set_corrupt(sc);
360
361 if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks,
362 fsc->fdblocks))
363 xchk_set_corrupt(sc);
364
365 return 0;
366}
diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c
index 16b536aa125e..23cf8e2f25db 100644
--- a/fs/xfs/scrub/health.c
+++ b/fs/xfs/scrub/health.c
@@ -109,6 +109,7 @@ static const struct xchk_health_map type_to_health_flag[XFS_SCRUB_TYPE_NR] = {
109 [XFS_SCRUB_TYPE_UQUOTA] = { XHG_FS, XFS_SICK_FS_UQUOTA }, 109 [XFS_SCRUB_TYPE_UQUOTA] = { XHG_FS, XFS_SICK_FS_UQUOTA },
110 [XFS_SCRUB_TYPE_GQUOTA] = { XHG_FS, XFS_SICK_FS_GQUOTA }, 110 [XFS_SCRUB_TYPE_GQUOTA] = { XHG_FS, XFS_SICK_FS_GQUOTA },
111 [XFS_SCRUB_TYPE_PQUOTA] = { XHG_FS, XFS_SICK_FS_PQUOTA }, 111 [XFS_SCRUB_TYPE_PQUOTA] = { XHG_FS, XFS_SICK_FS_PQUOTA },
112 [XFS_SCRUB_TYPE_FSCOUNTERS] = { XHG_FS, XFS_SICK_FS_COUNTERS },
112}; 113};
113 114
114/* Return the health status mask for this scrub type. */ 115/* Return the health status mask for this scrub type. */
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index ce13c1c366db..f630389ee176 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -352,6 +352,12 @@ static const struct xchk_meta_ops meta_scrub_ops[] = {
352 .scrub = xchk_quota, 352 .scrub = xchk_quota,
353 .repair = xrep_notsupported, 353 .repair = xrep_notsupported,
354 }, 354 },
355 [XFS_SCRUB_TYPE_FSCOUNTERS] = { /* fs summary counters */
356 .type = ST_FS,
357 .setup = xchk_setup_fscounters,
358 .scrub = xchk_fscounters,
359 .repair = xrep_notsupported,
360 },
355}; 361};
356 362
357/* This isn't a stable feature, warn once per day. */ 363/* This isn't a stable feature, warn once per day. */
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index 01986ed364db..ad1ceb44a628 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -127,6 +127,7 @@ xchk_quota(struct xfs_scrub *sc)
127 return -ENOENT; 127 return -ENOENT;
128} 128}
129#endif 129#endif
130int xchk_fscounters(struct xfs_scrub *sc);
130 131
131/* cross-referencing helpers */ 132/* cross-referencing helpers */
132void xchk_xref_is_used_space(struct xfs_scrub *sc, xfs_agblock_t agbno, 133void xchk_xref_is_used_space(struct xfs_scrub *sc, xfs_agblock_t agbno,
@@ -152,4 +153,12 @@ void xchk_xref_is_used_rt_space(struct xfs_scrub *sc, xfs_rtblock_t rtbno,
152# define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0) 153# define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
153#endif 154#endif
154 155
156struct xchk_fscounters {
157 uint64_t icount;
158 uint64_t ifree;
159 uint64_t fdblocks;
160 unsigned long long icount_min;
161 unsigned long long icount_max;
162};
163
155#endif /* __XFS_SCRUB_SCRUB_H__ */ 164#endif /* __XFS_SCRUB_SCRUB_H__ */
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 3c83e8b3b39c..3362bae28b46 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -50,6 +50,7 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RTSUM);
50TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_UQUOTA); 50TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_UQUOTA);
51TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_GQUOTA); 51TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_GQUOTA);
52TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PQUOTA); 52TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PQUOTA);
53TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_FSCOUNTERS);
53 54
54#define XFS_SCRUB_TYPE_STRINGS \ 55#define XFS_SCRUB_TYPE_STRINGS \
55 { XFS_SCRUB_TYPE_PROBE, "probe" }, \ 56 { XFS_SCRUB_TYPE_PROBE, "probe" }, \
@@ -75,7 +76,8 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PQUOTA);
75 { XFS_SCRUB_TYPE_RTSUM, "rtsummary" }, \ 76 { XFS_SCRUB_TYPE_RTSUM, "rtsummary" }, \
76 { XFS_SCRUB_TYPE_UQUOTA, "usrquota" }, \ 77 { XFS_SCRUB_TYPE_UQUOTA, "usrquota" }, \
77 { XFS_SCRUB_TYPE_GQUOTA, "grpquota" }, \ 78 { XFS_SCRUB_TYPE_GQUOTA, "grpquota" }, \
78 { XFS_SCRUB_TYPE_PQUOTA, "prjquota" } 79 { XFS_SCRUB_TYPE_PQUOTA, "prjquota" }, \
80 { XFS_SCRUB_TYPE_FSCOUNTERS, "fscounters" }
79 81
80DECLARE_EVENT_CLASS(xchk_class, 82DECLARE_EVENT_CLASS(xchk_class,
81 TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm, 83 TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm,
@@ -223,6 +225,7 @@ DEFINE_EVENT(xchk_block_error_class, name, \
223 void *ret_ip), \ 225 void *ret_ip), \
224 TP_ARGS(sc, daddr, ret_ip)) 226 TP_ARGS(sc, daddr, ret_ip))
225 227
228DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_fs_error);
226DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_error); 229DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_error);
227DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_preen); 230DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_preen);
228 231
@@ -590,6 +593,64 @@ TRACE_EVENT(xchk_iallocbt_check_cluster,
590 __entry->cluster_ino) 593 __entry->cluster_ino)
591) 594)
592 595
596TRACE_EVENT(xchk_fscounters_calc,
597 TP_PROTO(struct xfs_mount *mp, uint64_t icount, uint64_t ifree,
598 uint64_t fdblocks, uint64_t delalloc),
599 TP_ARGS(mp, icount, ifree, fdblocks, delalloc),
600 TP_STRUCT__entry(
601 __field(dev_t, dev)
602 __field(int64_t, icount_sb)
603 __field(uint64_t, icount_calculated)
604 __field(int64_t, ifree_sb)
605 __field(uint64_t, ifree_calculated)
606 __field(int64_t, fdblocks_sb)
607 __field(uint64_t, fdblocks_calculated)
608 __field(uint64_t, delalloc)
609 ),
610 TP_fast_assign(
611 __entry->dev = mp->m_super->s_dev;
612 __entry->icount_sb = mp->m_sb.sb_icount;
613 __entry->icount_calculated = icount;
614 __entry->ifree_sb = mp->m_sb.sb_ifree;
615 __entry->ifree_calculated = ifree;
616 __entry->fdblocks_sb = mp->m_sb.sb_fdblocks;
617 __entry->fdblocks_calculated = fdblocks;
618 __entry->delalloc = delalloc;
619 ),
620 TP_printk("dev %d:%d icount %lld:%llu ifree %lld::%llu fdblocks %lld::%llu delalloc %llu",
621 MAJOR(__entry->dev), MINOR(__entry->dev),
622 __entry->icount_sb,
623 __entry->icount_calculated,
624 __entry->ifree_sb,
625 __entry->ifree_calculated,
626 __entry->fdblocks_sb,
627 __entry->fdblocks_calculated,
628 __entry->delalloc)
629)
630
631TRACE_EVENT(xchk_fscounters_within_range,
632 TP_PROTO(struct xfs_mount *mp, uint64_t expected, int64_t curr_value,
633 int64_t old_value),
634 TP_ARGS(mp, expected, curr_value, old_value),
635 TP_STRUCT__entry(
636 __field(dev_t, dev)
637 __field(uint64_t, expected)
638 __field(int64_t, curr_value)
639 __field(int64_t, old_value)
640 ),
641 TP_fast_assign(
642 __entry->dev = mp->m_super->s_dev;
643 __entry->expected = expected;
644 __entry->curr_value = curr_value;
645 __entry->old_value = old_value;
646 ),
647 TP_printk("dev %d:%d expected %llu curr_value %lld old_value %lld",
648 MAJOR(__entry->dev), MINOR(__entry->dev),
649 __entry->expected,
650 __entry->curr_value,
651 __entry->old_value)
652)
653
593/* repair tracepoints */ 654/* repair tracepoints */
594#if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) 655#if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR)
595 656