1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
19 /* Default is 0 ("writethrough") */
20 static const char * const bch_cache_modes[] = {
28 /* Default is 0 ("auto") */
29 static const char * const bch_stop_on_failure_modes[] = {
35 static const char * const cache_replacement_policies[] = {
42 static const char * const error_actions[] = {
48 write_attribute(attach);
49 write_attribute(detach);
50 write_attribute(unregister);
51 write_attribute(stop);
52 write_attribute(clear_stats);
53 write_attribute(trigger_gc);
54 write_attribute(prune_cache);
55 write_attribute(flash_vol_create);
57 read_attribute(bucket_size);
58 read_attribute(block_size);
59 read_attribute(nbuckets);
60 read_attribute(tree_depth);
61 read_attribute(root_usage_percent);
62 read_attribute(priority_stats);
63 read_attribute(btree_cache_size);
64 read_attribute(btree_cache_max_chain);
65 read_attribute(cache_available_percent);
66 read_attribute(written);
67 read_attribute(btree_written);
68 read_attribute(metadata_written);
69 read_attribute(active_journal_entries);
70 read_attribute(backing_dev_name);
71 read_attribute(backing_dev_uuid);
73 sysfs_time_stats_attribute(btree_gc, sec, ms);
74 sysfs_time_stats_attribute(btree_split, sec, us);
75 sysfs_time_stats_attribute(btree_sort, ms, us);
76 sysfs_time_stats_attribute(btree_read, ms, us);
78 read_attribute(btree_nodes);
79 read_attribute(btree_used_percent);
80 read_attribute(average_key_size);
81 read_attribute(dirty_data);
82 read_attribute(bset_tree_stats);
84 read_attribute(state);
85 read_attribute(cache_read_races);
86 read_attribute(reclaim);
87 read_attribute(flush_write);
88 read_attribute(retry_flush_write);
89 read_attribute(writeback_keys_done);
90 read_attribute(writeback_keys_failed);
91 read_attribute(io_errors);
92 read_attribute(congested);
93 read_attribute(cutoff_writeback);
94 read_attribute(cutoff_writeback_sync);
95 rw_attribute(congested_read_threshold_us);
96 rw_attribute(congested_write_threshold_us);
98 rw_attribute(sequential_cutoff);
99 rw_attribute(data_csum);
100 rw_attribute(cache_mode);
101 rw_attribute(stop_when_cache_set_failed);
102 rw_attribute(writeback_metadata);
103 rw_attribute(writeback_running);
104 rw_attribute(writeback_percent);
105 rw_attribute(writeback_delay);
106 rw_attribute(writeback_rate);
108 rw_attribute(writeback_rate_update_seconds);
109 rw_attribute(writeback_rate_i_term_inverse);
110 rw_attribute(writeback_rate_p_term_inverse);
111 rw_attribute(writeback_rate_minimum);
112 read_attribute(writeback_rate_debug);
114 read_attribute(stripe_size);
115 read_attribute(partial_stripes_expensive);
117 rw_attribute(synchronous);
118 rw_attribute(journal_delay_ms);
119 rw_attribute(io_disable);
120 rw_attribute(discard);
121 rw_attribute(running);
123 rw_attribute(readahead);
124 rw_attribute(errors);
125 rw_attribute(io_error_limit);
126 rw_attribute(io_error_halflife);
127 rw_attribute(verify);
128 rw_attribute(bypass_torture_test);
129 rw_attribute(key_merging_disabled);
130 rw_attribute(gc_always_rewrite);
131 rw_attribute(expensive_debug_checks);
132 rw_attribute(cache_replacement_policy);
133 rw_attribute(btree_shrinker_disabled);
134 rw_attribute(copy_gc_enabled);
135 rw_attribute(gc_after_writeback);
138 static ssize_t bch_snprint_string_list(char *buf,
140 const char * const list[],
146 for (i = 0; list[i]; i++)
147 out += snprintf(out, buf + size - out,
148 i == selected ? "[%s] " : "%s ", list[i]);
154 SHOW(__bch_cached_dev)
156 struct cached_dev *dc = container_of(kobj, struct cached_dev,
158 char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
159 int wb = dc->writeback_running;
161 #define var(stat) (dc->stat)
163 if (attr == &sysfs_cache_mode)
164 return bch_snprint_string_list(buf, PAGE_SIZE,
166 BDEV_CACHE_MODE(&dc->sb));
168 if (attr == &sysfs_stop_when_cache_set_failed)
169 return bch_snprint_string_list(buf, PAGE_SIZE,
170 bch_stop_on_failure_modes,
171 dc->stop_when_cache_set_failed);
174 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
175 var_printf(verify, "%i");
176 var_printf(bypass_torture_test, "%i");
177 var_printf(writeback_metadata, "%i");
178 var_printf(writeback_running, "%i");
179 var_print(writeback_delay);
180 var_print(writeback_percent);
181 sysfs_hprint(writeback_rate,
182 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
183 sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
184 sysfs_printf(io_error_limit, "%i", dc->error_limit);
185 sysfs_printf(io_disable, "%i", dc->io_disable);
186 var_print(writeback_rate_update_seconds);
187 var_print(writeback_rate_i_term_inverse);
188 var_print(writeback_rate_p_term_inverse);
189 var_print(writeback_rate_minimum);
191 if (attr == &sysfs_writeback_rate_debug) {
195 char proportional[20];
201 * Except for dirty and target, other values should
202 * be 0 if writeback is not running.
205 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
207 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
208 bch_hprint(target, dc->writeback_rate_target << 9);
209 bch_hprint(proportional,
210 wb ? dc->writeback_rate_proportional << 9 : 0);
212 wb ? dc->writeback_rate_integral_scaled << 9 : 0);
213 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
214 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
221 "proportional:\t%s\n"
223 "change:\t\t%s/sec\n"
224 "next io:\t%llims\n",
225 rate, dirty, target, proportional,
226 integral, change, next_io);
229 sysfs_hprint(dirty_data,
230 bcache_dev_sectors_dirty(&dc->disk) << 9);
232 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9);
233 var_printf(partial_stripes_expensive, "%u");
235 var_hprint(sequential_cutoff);
236 var_hprint(readahead);
238 sysfs_print(running, atomic_read(&dc->running));
239 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
241 if (attr == &sysfs_label) {
242 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
243 buf[SB_LABEL_SIZE + 1] = '\0';
248 if (attr == &sysfs_backing_dev_name) {
249 snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
254 if (attr == &sysfs_backing_dev_uuid) {
255 /* convert binary uuid into 36-byte string plus '\0' */
256 snprintf(buf, 36+1, "%pU", dc->sb.uuid);
264 SHOW_LOCKED(bch_cached_dev)
268 struct cached_dev *dc = container_of(kobj, struct cached_dev,
272 struct kobj_uevent_env *env;
274 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
275 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
276 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
278 sysfs_strtoul(data_csum, dc->disk.data_csum);
280 sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
281 sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
282 sysfs_strtoul_bool(writeback_running, dc->writeback_running);
283 sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
285 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
286 0, bch_cutoff_writeback);
288 if (attr == &sysfs_writeback_rate) {
290 long int v = atomic_long_read(&dc->writeback_rate.rate);
292 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
295 atomic_long_set(&dc->writeback_rate.rate, v);
302 sysfs_strtoul_clamp(writeback_rate_update_seconds,
303 dc->writeback_rate_update_seconds,
304 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
305 sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
306 dc->writeback_rate_i_term_inverse,
308 sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
309 dc->writeback_rate_p_term_inverse,
311 sysfs_strtoul_clamp(writeback_rate_minimum,
312 dc->writeback_rate_minimum,
315 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
317 if (attr == &sysfs_io_disable) {
318 int v = strtoul_or_return(buf);
320 dc->io_disable = v ? 1 : 0;
323 sysfs_strtoul_clamp(sequential_cutoff,
324 dc->sequential_cutoff,
326 d_strtoi_h(readahead);
328 if (attr == &sysfs_clear_stats)
329 bch_cache_accounting_clear(&dc->accounting);
331 if (attr == &sysfs_running &&
332 strtoul_or_return(buf))
333 bch_cached_dev_run(dc);
335 if (attr == &sysfs_cache_mode) {
336 v = __sysfs_match_string(bch_cache_modes, -1, buf);
340 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
341 SET_BDEV_CACHE_MODE(&dc->sb, v);
342 bch_write_bdev_super(dc, NULL);
346 if (attr == &sysfs_stop_when_cache_set_failed) {
347 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
351 dc->stop_when_cache_set_failed = v;
354 if (attr == &sysfs_label) {
355 if (size > SB_LABEL_SIZE)
357 memcpy(dc->sb.label, buf, size);
358 if (size < SB_LABEL_SIZE)
359 dc->sb.label[size] = '\0';
360 if (size && dc->sb.label[size - 1] == '\n')
361 dc->sb.label[size - 1] = '\0';
362 bch_write_bdev_super(dc, NULL);
364 memcpy(dc->disk.c->uuids[dc->disk.id].label,
366 bch_uuid_write(dc->disk.c);
368 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
371 add_uevent_var(env, "DRIVER=bcache");
372 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
373 add_uevent_var(env, "CACHED_LABEL=%s", buf);
374 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
380 if (attr == &sysfs_attach) {
381 uint8_t set_uuid[16];
383 if (bch_parse_uuid(buf, set_uuid) < 16)
387 list_for_each_entry(c, &bch_cache_sets, list) {
388 v = bch_cached_dev_attach(dc, c, set_uuid);
393 pr_err("Can't attach %s: cache set not found", buf);
397 if (attr == &sysfs_detach && dc->disk.c)
398 bch_cached_dev_detach(dc);
400 if (attr == &sysfs_stop)
401 bcache_device_stop(&dc->disk);
406 STORE(bch_cached_dev)
408 struct cached_dev *dc = container_of(kobj, struct cached_dev,
411 mutex_lock(&bch_register_lock);
412 size = __cached_dev_store(kobj, attr, buf, size);
414 if (attr == &sysfs_writeback_running) {
415 /* dc->writeback_running changed in __cached_dev_store() */
416 if (IS_ERR_OR_NULL(dc->writeback_thread)) {
418 * reject setting it to 1 via sysfs if writeback
419 * kthread is not created yet.
421 if (dc->writeback_running) {
422 dc->writeback_running = false;
423 pr_err("%s: failed to run non-existent writeback thread",
424 dc->disk.disk->disk_name);
428 * writeback kthread will check if dc->writeback_running
431 bch_writeback_queue(dc);
434 if (attr == &sysfs_writeback_percent)
435 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
436 schedule_delayed_work(&dc->writeback_rate_update,
437 dc->writeback_rate_update_seconds * HZ);
439 mutex_unlock(&bch_register_lock);
443 static struct attribute *bch_cached_dev_files[] = {
451 &sysfs_stop_when_cache_set_failed,
452 &sysfs_writeback_metadata,
453 &sysfs_writeback_running,
454 &sysfs_writeback_delay,
455 &sysfs_writeback_percent,
456 &sysfs_writeback_rate,
457 &sysfs_writeback_rate_update_seconds,
458 &sysfs_writeback_rate_i_term_inverse,
459 &sysfs_writeback_rate_p_term_inverse,
460 &sysfs_writeback_rate_minimum,
461 &sysfs_writeback_rate_debug,
463 &sysfs_io_error_limit,
467 &sysfs_partial_stripes_expensive,
468 &sysfs_sequential_cutoff,
474 #ifdef CONFIG_BCACHE_DEBUG
476 &sysfs_bypass_torture_test,
478 &sysfs_backing_dev_name,
479 &sysfs_backing_dev_uuid,
482 KTYPE(bch_cached_dev);
486 struct bcache_device *d = container_of(kobj, struct bcache_device,
488 struct uuid_entry *u = &d->c->uuids[d->id];
490 sysfs_printf(data_csum, "%i", d->data_csum);
491 sysfs_hprint(size, u->sectors << 9);
493 if (attr == &sysfs_label) {
494 memcpy(buf, u->label, SB_LABEL_SIZE);
495 buf[SB_LABEL_SIZE + 1] = '\0';
503 STORE(__bch_flash_dev)
505 struct bcache_device *d = container_of(kobj, struct bcache_device,
507 struct uuid_entry *u = &d->c->uuids[d->id];
509 sysfs_strtoul(data_csum, d->data_csum);
511 if (attr == &sysfs_size) {
514 strtoi_h_or_return(buf, v);
517 bch_uuid_write(d->c);
518 set_capacity(d->disk, u->sectors);
521 if (attr == &sysfs_label) {
522 memcpy(u->label, buf, SB_LABEL_SIZE);
523 bch_uuid_write(d->c);
526 if (attr == &sysfs_unregister) {
527 set_bit(BCACHE_DEV_DETACHING, &d->flags);
528 bcache_device_stop(d);
533 STORE_LOCKED(bch_flash_dev)
535 static struct attribute *bch_flash_dev_files[] = {
544 KTYPE(bch_flash_dev);
546 struct bset_stats_op {
549 struct bset_stats stats;
552 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
554 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
557 bch_btree_keys_stats(&b->keys, &op->stats);
562 static int bch_bset_print_stats(struct cache_set *c, char *buf)
564 struct bset_stats_op op;
567 memset(&op, 0, sizeof(op));
568 bch_btree_op_init(&op.op, -1);
570 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
574 return snprintf(buf, PAGE_SIZE,
576 "written sets: %zu\n"
577 "unwritten sets: %zu\n"
578 "written key bytes: %zu\n"
579 "unwritten key bytes: %zu\n"
583 op.stats.sets_written, op.stats.sets_unwritten,
584 op.stats.bytes_written, op.stats.bytes_unwritten,
585 op.stats.floats, op.stats.failed);
588 static unsigned int bch_root_usage(struct cache_set *c)
590 unsigned int bytes = 0;
593 struct btree_iter iter;
601 rw_lock(false, b, b->level);
602 } while (b != c->root);
604 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
605 bytes += bkey_bytes(k);
609 return (bytes * 100) / btree_bytes(c);
612 static size_t bch_cache_size(struct cache_set *c)
617 mutex_lock(&c->bucket_lock);
618 list_for_each_entry(b, &c->btree_cache, list)
619 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
621 mutex_unlock(&c->bucket_lock);
625 static unsigned int bch_cache_max_chain(struct cache_set *c)
627 unsigned int ret = 0;
628 struct hlist_head *h;
630 mutex_lock(&c->bucket_lock);
632 for (h = c->bucket_hash;
633 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
636 struct hlist_node *p;
644 mutex_unlock(&c->bucket_lock);
648 static unsigned int bch_btree_used(struct cache_set *c)
650 return div64_u64(c->gc_stats.key_bytes * 100,
651 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
654 static unsigned int bch_average_key_size(struct cache_set *c)
656 return c->gc_stats.nkeys
657 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
661 SHOW(__bch_cache_set)
663 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
665 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
666 sysfs_print(journal_delay_ms, c->journal_delay_ms);
667 sysfs_hprint(bucket_size, bucket_bytes(c));
668 sysfs_hprint(block_size, block_bytes(c));
669 sysfs_print(tree_depth, c->root->level);
670 sysfs_print(root_usage_percent, bch_root_usage(c));
672 sysfs_hprint(btree_cache_size, bch_cache_size(c));
673 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
674 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
676 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
677 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
678 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
679 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
681 sysfs_print(btree_used_percent, bch_btree_used(c));
682 sysfs_print(btree_nodes, c->gc_stats.nodes);
683 sysfs_hprint(average_key_size, bch_average_key_size(c));
685 sysfs_print(cache_read_races,
686 atomic_long_read(&c->cache_read_races));
689 atomic_long_read(&c->reclaim));
691 sysfs_print(flush_write,
692 atomic_long_read(&c->flush_write));
694 sysfs_print(retry_flush_write,
695 atomic_long_read(&c->retry_flush_write));
697 sysfs_print(writeback_keys_done,
698 atomic_long_read(&c->writeback_keys_done));
699 sysfs_print(writeback_keys_failed,
700 atomic_long_read(&c->writeback_keys_failed));
702 if (attr == &sysfs_errors)
703 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
706 /* See count_io_errors for why 88 */
707 sysfs_print(io_error_halflife, c->error_decay * 88);
708 sysfs_print(io_error_limit, c->error_limit);
710 sysfs_hprint(congested,
711 ((uint64_t) bch_get_congested(c)) << 9);
712 sysfs_print(congested_read_threshold_us,
713 c->congested_read_threshold_us);
714 sysfs_print(congested_write_threshold_us,
715 c->congested_write_threshold_us);
717 sysfs_print(cutoff_writeback, bch_cutoff_writeback);
718 sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
720 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
721 sysfs_printf(verify, "%i", c->verify);
722 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
723 sysfs_printf(expensive_debug_checks,
724 "%i", c->expensive_debug_checks);
725 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
726 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
727 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
728 sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
729 sysfs_printf(io_disable, "%i",
730 test_bit(CACHE_SET_IO_DISABLE, &c->flags));
732 if (attr == &sysfs_bset_tree_stats)
733 return bch_bset_print_stats(c, buf);
737 SHOW_LOCKED(bch_cache_set)
739 STORE(__bch_cache_set)
741 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
744 if (attr == &sysfs_unregister)
745 bch_cache_set_unregister(c);
747 if (attr == &sysfs_stop)
748 bch_cache_set_stop(c);
750 if (attr == &sysfs_synchronous) {
751 bool sync = strtoul_or_return(buf);
753 if (sync != CACHE_SYNC(&c->sb)) {
754 SET_CACHE_SYNC(&c->sb, sync);
755 bcache_write_super(c);
759 if (attr == &sysfs_flash_vol_create) {
763 strtoi_h_or_return(buf, v);
765 r = bch_flash_dev_create(c, v);
770 if (attr == &sysfs_clear_stats) {
771 atomic_long_set(&c->writeback_keys_done, 0);
772 atomic_long_set(&c->writeback_keys_failed, 0);
774 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
775 bch_cache_accounting_clear(&c->accounting);
778 if (attr == &sysfs_trigger_gc)
781 if (attr == &sysfs_prune_cache) {
782 struct shrink_control sc;
784 sc.gfp_mask = GFP_KERNEL;
785 sc.nr_to_scan = strtoul_or_return(buf);
786 c->shrink.scan_objects(&c->shrink, &sc);
789 sysfs_strtoul_clamp(congested_read_threshold_us,
790 c->congested_read_threshold_us,
792 sysfs_strtoul_clamp(congested_write_threshold_us,
793 c->congested_write_threshold_us,
796 if (attr == &sysfs_errors) {
797 v = __sysfs_match_string(error_actions, -1, buf);
804 sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
806 /* See count_io_errors() for why 88 */
807 if (attr == &sysfs_io_error_halflife) {
811 ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
813 c->error_decay = v / 88;
819 if (attr == &sysfs_io_disable) {
820 v = strtoul_or_return(buf);
822 if (test_and_set_bit(CACHE_SET_IO_DISABLE,
824 pr_warn("CACHE_SET_IO_DISABLE already set");
826 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
828 pr_warn("CACHE_SET_IO_DISABLE already cleared");
832 sysfs_strtoul_clamp(journal_delay_ms,
835 sysfs_strtoul_bool(verify, c->verify);
836 sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
837 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
838 sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite);
839 sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
840 sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled);
842 * write gc_after_writeback here may overwrite an already set
843 * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
844 * set in next chance.
846 sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
850 STORE_LOCKED(bch_cache_set)
852 SHOW(bch_cache_set_internal)
854 struct cache_set *c = container_of(kobj, struct cache_set, internal);
856 return bch_cache_set_show(&c->kobj, attr, buf);
859 STORE(bch_cache_set_internal)
861 struct cache_set *c = container_of(kobj, struct cache_set, internal);
863 return bch_cache_set_store(&c->kobj, attr, buf, size);
866 static void bch_cache_set_internal_release(struct kobject *k)
870 static struct attribute *bch_cache_set_files[] = {
874 &sysfs_journal_delay_ms,
875 &sysfs_flash_vol_create,
880 &sysfs_root_usage_percent,
881 &sysfs_btree_cache_size,
882 &sysfs_cache_available_percent,
884 &sysfs_average_key_size,
887 &sysfs_io_error_limit,
888 &sysfs_io_error_halflife,
890 &sysfs_congested_read_threshold_us,
891 &sysfs_congested_write_threshold_us,
895 KTYPE(bch_cache_set);
897 static struct attribute *bch_cache_set_internal_files[] = {
898 &sysfs_active_journal_entries,
900 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
901 sysfs_time_stats_attribute_list(btree_split, sec, us)
902 sysfs_time_stats_attribute_list(btree_sort, ms, us)
903 sysfs_time_stats_attribute_list(btree_read, ms, us)
906 &sysfs_btree_used_percent,
907 &sysfs_btree_cache_max_chain,
909 &sysfs_bset_tree_stats,
910 &sysfs_cache_read_races,
913 &sysfs_retry_flush_write,
914 &sysfs_writeback_keys_done,
915 &sysfs_writeback_keys_failed,
919 #ifdef CONFIG_BCACHE_DEBUG
921 &sysfs_key_merging_disabled,
922 &sysfs_expensive_debug_checks,
924 &sysfs_gc_always_rewrite,
925 &sysfs_btree_shrinker_disabled,
926 &sysfs_copy_gc_enabled,
927 &sysfs_gc_after_writeback,
929 &sysfs_cutoff_writeback,
930 &sysfs_cutoff_writeback_sync,
933 KTYPE(bch_cache_set_internal);
935 static int __bch_cache_cmp(const void *l, const void *r)
937 return *((uint16_t *)r) - *((uint16_t *)l);
942 struct cache *ca = container_of(kobj, struct cache, kobj);
944 sysfs_hprint(bucket_size, bucket_bytes(ca));
945 sysfs_hprint(block_size, block_bytes(ca));
946 sysfs_print(nbuckets, ca->sb.nbuckets);
947 sysfs_print(discard, ca->discard);
948 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
949 sysfs_hprint(btree_written,
950 atomic_long_read(&ca->btree_sectors_written) << 9);
951 sysfs_hprint(metadata_written,
952 (atomic_long_read(&ca->meta_sectors_written) +
953 atomic_long_read(&ca->btree_sectors_written)) << 9);
955 sysfs_print(io_errors,
956 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
958 if (attr == &sysfs_cache_replacement_policy)
959 return bch_snprint_string_list(buf, PAGE_SIZE,
960 cache_replacement_policies,
961 CACHE_REPLACEMENT(&ca->sb));
963 if (attr == &sysfs_priority_stats) {
965 size_t n = ca->sb.nbuckets, i;
966 size_t unused = 0, available = 0, dirty = 0, meta = 0;
968 /* Compute 31 quantiles */
969 uint16_t q[31], *p, *cached;
972 cached = p = vmalloc(array_size(sizeof(uint16_t),
977 mutex_lock(&ca->set->bucket_lock);
978 for_each_bucket(b, ca) {
979 if (!GC_SECTORS_USED(b))
981 if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
983 if (GC_MARK(b) == GC_MARK_DIRTY)
985 if (GC_MARK(b) == GC_MARK_METADATA)
989 for (i = ca->sb.first_bucket; i < n; i++)
990 p[i] = ca->buckets[i].prio;
991 mutex_unlock(&ca->set->bucket_lock);
993 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
999 unused = ca->sb.nbuckets - n;
1001 while (cached < p + n &&
1002 *cached == BTREE_PRIO)
1005 for (i = 0; i < n; i++)
1006 sum += INITIAL_PRIO - cached[i];
1011 for (i = 0; i < ARRAY_SIZE(q); i++)
1012 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
1013 (ARRAY_SIZE(q) + 1)];
1017 ret = scnprintf(buf, PAGE_SIZE,
1023 "Sectors per Q: %zu\n"
1025 unused * 100 / (size_t) ca->sb.nbuckets,
1026 available * 100 / (size_t) ca->sb.nbuckets,
1027 dirty * 100 / (size_t) ca->sb.nbuckets,
1028 meta * 100 / (size_t) ca->sb.nbuckets, sum,
1029 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1031 for (i = 0; i < ARRAY_SIZE(q); i++)
1032 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1036 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1043 SHOW_LOCKED(bch_cache)
1047 struct cache *ca = container_of(kobj, struct cache, kobj);
1050 if (attr == &sysfs_discard) {
1051 bool v = strtoul_or_return(buf);
1053 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1056 if (v != CACHE_DISCARD(&ca->sb)) {
1057 SET_CACHE_DISCARD(&ca->sb, v);
1058 bcache_write_super(ca->set);
1062 if (attr == &sysfs_cache_replacement_policy) {
1063 v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1067 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1068 mutex_lock(&ca->set->bucket_lock);
1069 SET_CACHE_REPLACEMENT(&ca->sb, v);
1070 mutex_unlock(&ca->set->bucket_lock);
1072 bcache_write_super(ca->set);
1076 if (attr == &sysfs_clear_stats) {
1077 atomic_long_set(&ca->sectors_written, 0);
1078 atomic_long_set(&ca->btree_sectors_written, 0);
1079 atomic_long_set(&ca->meta_sectors_written, 0);
1080 atomic_set(&ca->io_count, 0);
1081 atomic_set(&ca->io_errors, 0);
1086 STORE_LOCKED(bch_cache)
1088 static struct attribute *bch_cache_files[] = {
1092 &sysfs_priority_stats,
1095 &sysfs_btree_written,
1096 &sysfs_metadata_written,
1099 &sysfs_cache_replacement_policy,