1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2023 Oracle. All Rights Reserved.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_btree.h"
14 #include "xfs_super.h"
15 #include "scrub/scrub.h"
16 #include "scrub/stats.h"
17 #include "scrub/trace.h"
19 struct xchk_scrub_stats {
20 /* all 32-bit counters here */
34 uint32_t repair_invocations;
35 uint32_t repair_success;
37 /* all 64-bit items here */
40 uint64_t checktime_us;
41 uint64_t repairtime_us;
43 /* non-counter state must go at the end for clearall */
48 struct dentry *cs_debugfs;
49 struct xchk_scrub_stats cs_stats[XFS_SCRUB_TYPE_NR];
53 static struct xchk_stats global_stats;
55 static const char *name_map[XFS_SCRUB_TYPE_NR] = {
56 [XFS_SCRUB_TYPE_SB] = "sb",
57 [XFS_SCRUB_TYPE_AGF] = "agf",
58 [XFS_SCRUB_TYPE_AGFL] = "agfl",
59 [XFS_SCRUB_TYPE_AGI] = "agi",
60 [XFS_SCRUB_TYPE_BNOBT] = "bnobt",
61 [XFS_SCRUB_TYPE_CNTBT] = "cntbt",
62 [XFS_SCRUB_TYPE_INOBT] = "inobt",
63 [XFS_SCRUB_TYPE_FINOBT] = "finobt",
64 [XFS_SCRUB_TYPE_RMAPBT] = "rmapbt",
65 [XFS_SCRUB_TYPE_REFCNTBT] = "refcountbt",
66 [XFS_SCRUB_TYPE_INODE] = "inode",
67 [XFS_SCRUB_TYPE_BMBTD] = "bmapbtd",
68 [XFS_SCRUB_TYPE_BMBTA] = "bmapbta",
69 [XFS_SCRUB_TYPE_BMBTC] = "bmapbtc",
70 [XFS_SCRUB_TYPE_DIR] = "directory",
71 [XFS_SCRUB_TYPE_XATTR] = "xattr",
72 [XFS_SCRUB_TYPE_SYMLINK] = "symlink",
73 [XFS_SCRUB_TYPE_PARENT] = "parent",
74 [XFS_SCRUB_TYPE_RTBITMAP] = "rtbitmap",
75 [XFS_SCRUB_TYPE_RTSUM] = "rtsummary",
76 [XFS_SCRUB_TYPE_UQUOTA] = "usrquota",
77 [XFS_SCRUB_TYPE_GQUOTA] = "grpquota",
78 [XFS_SCRUB_TYPE_PQUOTA] = "prjquota",
79 [XFS_SCRUB_TYPE_FSCOUNTERS] = "fscounters",
80 [XFS_SCRUB_TYPE_QUOTACHECK] = "quotacheck",
81 [XFS_SCRUB_TYPE_NLINKS] = "nlinks",
82 [XFS_SCRUB_TYPE_DIRTREE] = "dirtree",
83 [XFS_SCRUB_TYPE_METAPATH] = "metapath",
84 [XFS_SCRUB_TYPE_RGSUPER] = "rgsuper",
87 /* Format the scrub stats into a text buffer, similar to pcp style. */
90 struct xchk_stats *cs,
94 struct xchk_scrub_stats *css = &cs->cs_stats[0];
99 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
103 ret = scnprintf(buf, remaining,
104 "%s %u %u %u %u %u %u %u %u %u %llu %u %u %llu\n",
106 (unsigned int)css->invocations,
107 (unsigned int)css->clean,
108 (unsigned int)css->corrupt,
109 (unsigned int)css->preen,
110 (unsigned int)css->xfail,
111 (unsigned int)css->xcorrupt,
112 (unsigned int)css->incomplete,
113 (unsigned int)css->warning,
114 (unsigned int)css->retries,
115 (unsigned long long)css->checktime_us,
116 (unsigned int)css->repair_invocations,
117 (unsigned int)css->repair_success,
118 (unsigned long long)css->repairtime_us);
127 return copied > 0 ? copied : ret;
130 /* Estimate the worst case buffer size required to hold the whole report. */
132 xchk_stats_estimate_bufsize(
133 struct xchk_stats *cs)
135 struct xchk_scrub_stats *css = &cs->cs_stats[0];
140 /* 4294967296 plus one space for each u32 field */
141 field_width = 11 * (offsetof(struct xchk_scrub_stats, checktime_us) /
144 /* 18446744073709551615 plus one space for each u64 field */
145 field_width += 21 * ((offsetof(struct xchk_scrub_stats, css_lock) -
146 offsetof(struct xchk_scrub_stats, checktime_us)) /
149 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
153 /* name plus one space */
154 ret += 1 + strlen(name_map[i]);
156 /* all fields, plus newline */
157 ret += field_width + 1;
163 /* Clear all counters. */
166 struct xchk_stats *cs)
168 struct xchk_scrub_stats *css = &cs->cs_stats[0];
171 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
172 spin_lock(&css->css_lock);
173 memset(css, 0, offsetof(struct xchk_scrub_stats, css_lock));
174 spin_unlock(&css->css_lock);
178 #define XFS_SCRUB_OFLAG_UNCLEAN (XFS_SCRUB_OFLAG_CORRUPT | \
179 XFS_SCRUB_OFLAG_PREEN | \
180 XFS_SCRUB_OFLAG_XFAIL | \
181 XFS_SCRUB_OFLAG_XCORRUPT | \
182 XFS_SCRUB_OFLAG_INCOMPLETE | \
183 XFS_SCRUB_OFLAG_WARNING)
186 xchk_stats_merge_one(
187 struct xchk_stats *cs,
188 const struct xfs_scrub_metadata *sm,
189 const struct xchk_stats_run *run)
191 struct xchk_scrub_stats *css;
193 if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
194 ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
198 css = &cs->cs_stats[sm->sm_type];
199 spin_lock(&css->css_lock);
201 if (!(sm->sm_flags & XFS_SCRUB_OFLAG_UNCLEAN))
203 if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
205 if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN)
207 if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL)
209 if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT)
211 if (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
213 if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING)
215 css->retries += run->retries;
216 css->checktime_us += howmany_64(run->scrub_ns, NSEC_PER_USEC);
218 if (run->repair_attempted)
219 css->repair_invocations++;
220 if (run->repair_succeeded)
221 css->repair_success++;
222 css->repairtime_us += howmany_64(run->repair_ns, NSEC_PER_USEC);
223 spin_unlock(&css->css_lock);
226 /* Merge these scrub-run stats into the global and mount stat data. */
229 struct xfs_mount *mp,
230 const struct xfs_scrub_metadata *sm,
231 const struct xchk_stats_run *run)
233 xchk_stats_merge_one(&global_stats, sm, run);
234 xchk_stats_merge_one(mp->m_scrub_stats, sm, run);
237 /* debugfs boilerplate */
240 xchk_scrub_stats_read(
246 struct xchk_stats *cs = file->private_data;
252 * This generates stringly snapshot of all the scrub counters, so we
253 * do not want userspace to receive garbled text from multiple calls.
254 * If the file position is greater than 0, return a short read.
259 bufsize = xchk_stats_estimate_bufsize(cs);
261 buf = kvmalloc(bufsize, XCHK_GFP_FLAGS);
265 avail = xchk_stats_format(cs, buf, bufsize);
271 ret = simple_read_from_buffer(ubuf, count, ppos, buf, avail);
277 static const struct file_operations scrub_stats_fops = {
279 .read = xchk_scrub_stats_read,
283 xchk_clear_scrub_stats_write(
285 const char __user *ubuf,
289 struct xchk_stats *cs = file->private_data;
293 ret = kstrtouint_from_user(ubuf, count, 0, &val);
300 xchk_stats_clearall(cs);
304 static const struct file_operations clear_scrub_stats_fops = {
306 .write = xchk_clear_scrub_stats_write,
309 /* Initialize the stats object. */
312 struct xchk_stats *cs,
313 struct xfs_mount *mp)
315 struct xchk_scrub_stats *css = &cs->cs_stats[0];
318 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++)
319 spin_lock_init(&css->css_lock);
324 /* Connect the stats object to debugfs. */
327 struct xchk_stats *cs,
328 struct dentry *parent)
333 cs->cs_debugfs = xfs_debugfs_mkdir("scrub", parent);
337 debugfs_create_file("stats", 0444, cs->cs_debugfs, cs,
339 debugfs_create_file("clear_stats", 0200, cs->cs_debugfs, cs,
340 &clear_scrub_stats_fops);
343 /* Free all resources related to the stats object. */
346 struct xchk_stats *cs)
351 /* Disconnect the stats object from debugfs. */
353 xchk_stats_unregister(
354 struct xchk_stats *cs)
356 debugfs_remove(cs->cs_debugfs);
359 /* Initialize global stats and register them */
361 xchk_global_stats_setup(
362 struct dentry *parent)
366 error = xchk_stats_init(&global_stats, NULL);
370 xchk_stats_register(&global_stats, parent);
374 /* Unregister global stats and tear them down */
376 xchk_global_stats_teardown(void)
378 xchk_stats_unregister(&global_stats);
379 xchk_stats_teardown(&global_stats);
382 /* Allocate per-mount stats */
384 xchk_mount_stats_alloc(
385 struct xfs_mount *mp)
387 struct xchk_stats *cs;
390 cs = kvzalloc(sizeof(struct xchk_stats), GFP_KERNEL);
394 error = xchk_stats_init(cs, mp);
398 mp->m_scrub_stats = cs;
405 /* Free per-mount stats */
407 xchk_mount_stats_free(
408 struct xfs_mount *mp)
410 xchk_stats_teardown(mp->m_scrub_stats);
411 kvfree(mp->m_scrub_stats);
412 mp->m_scrub_stats = NULL;