1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON sysfs Interface
8 #include <linux/damon.h>
9 #include <linux/kobject.h>
10 #include <linux/pid.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
14 static DEFINE_MUTEX(damon_sysfs_lock);
17 * unsigned long range directory
20 struct damon_sysfs_ul_range {
26 static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
30 struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
35 range->kobj = (struct kobject){};
42 static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
45 struct damon_sysfs_ul_range *range = container_of(kobj,
46 struct damon_sysfs_ul_range, kobj);
48 return sysfs_emit(buf, "%lu\n", range->min);
51 static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
52 const char *buf, size_t count)
54 struct damon_sysfs_ul_range *range = container_of(kobj,
55 struct damon_sysfs_ul_range, kobj);
59 err = kstrtoul(buf, 0, &min);
67 static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
70 struct damon_sysfs_ul_range *range = container_of(kobj,
71 struct damon_sysfs_ul_range, kobj);
73 return sysfs_emit(buf, "%lu\n", range->max);
76 static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
77 const char *buf, size_t count)
79 struct damon_sysfs_ul_range *range = container_of(kobj,
80 struct damon_sysfs_ul_range, kobj);
84 err = kstrtoul(buf, 0, &max);
92 static void damon_sysfs_ul_range_release(struct kobject *kobj)
94 kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
97 static struct kobj_attribute damon_sysfs_ul_range_min_attr =
98 __ATTR_RW_MODE(min, 0600);
100 static struct kobj_attribute damon_sysfs_ul_range_max_attr =
101 __ATTR_RW_MODE(max, 0600);
103 static struct attribute *damon_sysfs_ul_range_attrs[] = {
104 &damon_sysfs_ul_range_min_attr.attr,
105 &damon_sysfs_ul_range_max_attr.attr,
108 ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
110 static struct kobj_type damon_sysfs_ul_range_ktype = {
111 .release = damon_sysfs_ul_range_release,
112 .sysfs_ops = &kobj_sysfs_ops,
113 .default_groups = damon_sysfs_ul_range_groups,
117 * schemes/stats directory
120 struct damon_sysfs_stats {
122 unsigned long nr_tried;
123 unsigned long sz_tried;
124 unsigned long nr_applied;
125 unsigned long sz_applied;
126 unsigned long qt_exceeds;
129 static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void)
131 return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL);
134 static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
137 struct damon_sysfs_stats *stats = container_of(kobj,
138 struct damon_sysfs_stats, kobj);
140 return sysfs_emit(buf, "%lu\n", stats->nr_tried);
143 static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
146 struct damon_sysfs_stats *stats = container_of(kobj,
147 struct damon_sysfs_stats, kobj);
149 return sysfs_emit(buf, "%lu\n", stats->sz_tried);
152 static ssize_t nr_applied_show(struct kobject *kobj,
153 struct kobj_attribute *attr, char *buf)
155 struct damon_sysfs_stats *stats = container_of(kobj,
156 struct damon_sysfs_stats, kobj);
158 return sysfs_emit(buf, "%lu\n", stats->nr_applied);
161 static ssize_t sz_applied_show(struct kobject *kobj,
162 struct kobj_attribute *attr, char *buf)
164 struct damon_sysfs_stats *stats = container_of(kobj,
165 struct damon_sysfs_stats, kobj);
167 return sysfs_emit(buf, "%lu\n", stats->sz_applied);
170 static ssize_t qt_exceeds_show(struct kobject *kobj,
171 struct kobj_attribute *attr, char *buf)
173 struct damon_sysfs_stats *stats = container_of(kobj,
174 struct damon_sysfs_stats, kobj);
176 return sysfs_emit(buf, "%lu\n", stats->qt_exceeds);
179 static void damon_sysfs_stats_release(struct kobject *kobj)
181 kfree(container_of(kobj, struct damon_sysfs_stats, kobj));
184 static struct kobj_attribute damon_sysfs_stats_nr_tried_attr =
185 __ATTR_RO_MODE(nr_tried, 0400);
187 static struct kobj_attribute damon_sysfs_stats_sz_tried_attr =
188 __ATTR_RO_MODE(sz_tried, 0400);
190 static struct kobj_attribute damon_sysfs_stats_nr_applied_attr =
191 __ATTR_RO_MODE(nr_applied, 0400);
193 static struct kobj_attribute damon_sysfs_stats_sz_applied_attr =
194 __ATTR_RO_MODE(sz_applied, 0400);
196 static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr =
197 __ATTR_RO_MODE(qt_exceeds, 0400);
199 static struct attribute *damon_sysfs_stats_attrs[] = {
200 &damon_sysfs_stats_nr_tried_attr.attr,
201 &damon_sysfs_stats_sz_tried_attr.attr,
202 &damon_sysfs_stats_nr_applied_attr.attr,
203 &damon_sysfs_stats_sz_applied_attr.attr,
204 &damon_sysfs_stats_qt_exceeds_attr.attr,
207 ATTRIBUTE_GROUPS(damon_sysfs_stats);
209 static struct kobj_type damon_sysfs_stats_ktype = {
210 .release = damon_sysfs_stats_release,
211 .sysfs_ops = &kobj_sysfs_ops,
212 .default_groups = damon_sysfs_stats_groups,
216 * watermarks directory
219 struct damon_sysfs_watermarks {
221 enum damos_wmark_metric metric;
222 unsigned long interval_us;
228 static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
229 enum damos_wmark_metric metric, unsigned long interval_us,
230 unsigned long high, unsigned long mid, unsigned long low)
232 struct damon_sysfs_watermarks *watermarks = kmalloc(
233 sizeof(*watermarks), GFP_KERNEL);
237 watermarks->kobj = (struct kobject){};
238 watermarks->metric = metric;
239 watermarks->interval_us = interval_us;
240 watermarks->high = high;
241 watermarks->mid = mid;
242 watermarks->low = low;
246 /* Should match with enum damos_wmark_metric */
247 static const char * const damon_sysfs_wmark_metric_strs[] = {
252 static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr,
255 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
256 struct damon_sysfs_watermarks, kobj);
258 return sysfs_emit(buf, "%s\n",
259 damon_sysfs_wmark_metric_strs[watermarks->metric]);
262 static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr,
263 const char *buf, size_t count)
265 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
266 struct damon_sysfs_watermarks, kobj);
267 enum damos_wmark_metric metric;
269 for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) {
270 if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) {
271 watermarks->metric = metric;
278 static ssize_t interval_us_show(struct kobject *kobj,
279 struct kobj_attribute *attr, char *buf)
281 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
282 struct damon_sysfs_watermarks, kobj);
284 return sysfs_emit(buf, "%lu\n", watermarks->interval_us);
287 static ssize_t interval_us_store(struct kobject *kobj,
288 struct kobj_attribute *attr, const char *buf, size_t count)
290 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
291 struct damon_sysfs_watermarks, kobj);
292 int err = kstrtoul(buf, 0, &watermarks->interval_us);
299 static ssize_t high_show(struct kobject *kobj,
300 struct kobj_attribute *attr, char *buf)
302 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
303 struct damon_sysfs_watermarks, kobj);
305 return sysfs_emit(buf, "%lu\n", watermarks->high);
308 static ssize_t high_store(struct kobject *kobj,
309 struct kobj_attribute *attr, const char *buf, size_t count)
311 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
312 struct damon_sysfs_watermarks, kobj);
313 int err = kstrtoul(buf, 0, &watermarks->high);
320 static ssize_t mid_show(struct kobject *kobj,
321 struct kobj_attribute *attr, char *buf)
323 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
324 struct damon_sysfs_watermarks, kobj);
326 return sysfs_emit(buf, "%lu\n", watermarks->mid);
329 static ssize_t mid_store(struct kobject *kobj,
330 struct kobj_attribute *attr, const char *buf, size_t count)
332 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
333 struct damon_sysfs_watermarks, kobj);
334 int err = kstrtoul(buf, 0, &watermarks->mid);
341 static ssize_t low_show(struct kobject *kobj,
342 struct kobj_attribute *attr, char *buf)
344 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
345 struct damon_sysfs_watermarks, kobj);
347 return sysfs_emit(buf, "%lu\n", watermarks->low);
350 static ssize_t low_store(struct kobject *kobj,
351 struct kobj_attribute *attr, const char *buf, size_t count)
353 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
354 struct damon_sysfs_watermarks, kobj);
355 int err = kstrtoul(buf, 0, &watermarks->low);
362 static void damon_sysfs_watermarks_release(struct kobject *kobj)
364 kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj));
367 static struct kobj_attribute damon_sysfs_watermarks_metric_attr =
368 __ATTR_RW_MODE(metric, 0600);
370 static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr =
371 __ATTR_RW_MODE(interval_us, 0600);
373 static struct kobj_attribute damon_sysfs_watermarks_high_attr =
374 __ATTR_RW_MODE(high, 0600);
376 static struct kobj_attribute damon_sysfs_watermarks_mid_attr =
377 __ATTR_RW_MODE(mid, 0600);
379 static struct kobj_attribute damon_sysfs_watermarks_low_attr =
380 __ATTR_RW_MODE(low, 0600);
382 static struct attribute *damon_sysfs_watermarks_attrs[] = {
383 &damon_sysfs_watermarks_metric_attr.attr,
384 &damon_sysfs_watermarks_interval_us_attr.attr,
385 &damon_sysfs_watermarks_high_attr.attr,
386 &damon_sysfs_watermarks_mid_attr.attr,
387 &damon_sysfs_watermarks_low_attr.attr,
390 ATTRIBUTE_GROUPS(damon_sysfs_watermarks);
392 static struct kobj_type damon_sysfs_watermarks_ktype = {
393 .release = damon_sysfs_watermarks_release,
394 .sysfs_ops = &kobj_sysfs_ops,
395 .default_groups = damon_sysfs_watermarks_groups,
399 * scheme/weights directory
402 struct damon_sysfs_weights {
405 unsigned int nr_accesses;
409 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz,
410 unsigned int nr_accesses, unsigned int age)
412 struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights),
417 weights->kobj = (struct kobject){};
419 weights->nr_accesses = nr_accesses;
424 static ssize_t sz_permil_show(struct kobject *kobj,
425 struct kobj_attribute *attr, char *buf)
427 struct damon_sysfs_weights *weights = container_of(kobj,
428 struct damon_sysfs_weights, kobj);
430 return sysfs_emit(buf, "%u\n", weights->sz);
433 static ssize_t sz_permil_store(struct kobject *kobj,
434 struct kobj_attribute *attr, const char *buf, size_t count)
436 struct damon_sysfs_weights *weights = container_of(kobj,
437 struct damon_sysfs_weights, kobj);
438 int err = kstrtouint(buf, 0, &weights->sz);
445 static ssize_t nr_accesses_permil_show(struct kobject *kobj,
446 struct kobj_attribute *attr, char *buf)
448 struct damon_sysfs_weights *weights = container_of(kobj,
449 struct damon_sysfs_weights, kobj);
451 return sysfs_emit(buf, "%u\n", weights->nr_accesses);
454 static ssize_t nr_accesses_permil_store(struct kobject *kobj,
455 struct kobj_attribute *attr, const char *buf, size_t count)
457 struct damon_sysfs_weights *weights = container_of(kobj,
458 struct damon_sysfs_weights, kobj);
459 int err = kstrtouint(buf, 0, &weights->nr_accesses);
466 static ssize_t age_permil_show(struct kobject *kobj,
467 struct kobj_attribute *attr, char *buf)
469 struct damon_sysfs_weights *weights = container_of(kobj,
470 struct damon_sysfs_weights, kobj);
472 return sysfs_emit(buf, "%u\n", weights->age);
475 static ssize_t age_permil_store(struct kobject *kobj,
476 struct kobj_attribute *attr, const char *buf, size_t count)
478 struct damon_sysfs_weights *weights = container_of(kobj,
479 struct damon_sysfs_weights, kobj);
480 int err = kstrtouint(buf, 0, &weights->age);
487 static void damon_sysfs_weights_release(struct kobject *kobj)
489 kfree(container_of(kobj, struct damon_sysfs_weights, kobj));
492 static struct kobj_attribute damon_sysfs_weights_sz_attr =
493 __ATTR_RW_MODE(sz_permil, 0600);
495 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr =
496 __ATTR_RW_MODE(nr_accesses_permil, 0600);
498 static struct kobj_attribute damon_sysfs_weights_age_attr =
499 __ATTR_RW_MODE(age_permil, 0600);
501 static struct attribute *damon_sysfs_weights_attrs[] = {
502 &damon_sysfs_weights_sz_attr.attr,
503 &damon_sysfs_weights_nr_accesses_attr.attr,
504 &damon_sysfs_weights_age_attr.attr,
507 ATTRIBUTE_GROUPS(damon_sysfs_weights);
509 static struct kobj_type damon_sysfs_weights_ktype = {
510 .release = damon_sysfs_weights_release,
511 .sysfs_ops = &kobj_sysfs_ops,
512 .default_groups = damon_sysfs_weights_groups,
519 struct damon_sysfs_quotas {
521 struct damon_sysfs_weights *weights;
524 unsigned long reset_interval_ms;
527 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
529 return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL);
532 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
534 struct damon_sysfs_weights *weights;
537 weights = damon_sysfs_weights_alloc(0, 0, 0);
541 err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
542 "as->kobj, "weights");
544 kobject_put(&weights->kobj);
546 quotas->weights = weights;
550 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
552 kobject_put("as->weights->kobj);
555 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
558 struct damon_sysfs_quotas *quotas = container_of(kobj,
559 struct damon_sysfs_quotas, kobj);
561 return sysfs_emit(buf, "%lu\n", quotas->ms);
564 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr,
565 const char *buf, size_t count)
567 struct damon_sysfs_quotas *quotas = container_of(kobj,
568 struct damon_sysfs_quotas, kobj);
569 int err = kstrtoul(buf, 0, "as->ms);
576 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr,
579 struct damon_sysfs_quotas *quotas = container_of(kobj,
580 struct damon_sysfs_quotas, kobj);
582 return sysfs_emit(buf, "%lu\n", quotas->sz);
585 static ssize_t bytes_store(struct kobject *kobj,
586 struct kobj_attribute *attr, const char *buf, size_t count)
588 struct damon_sysfs_quotas *quotas = container_of(kobj,
589 struct damon_sysfs_quotas, kobj);
590 int err = kstrtoul(buf, 0, "as->sz);
597 static ssize_t reset_interval_ms_show(struct kobject *kobj,
598 struct kobj_attribute *attr, char *buf)
600 struct damon_sysfs_quotas *quotas = container_of(kobj,
601 struct damon_sysfs_quotas, kobj);
603 return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms);
606 static ssize_t reset_interval_ms_store(struct kobject *kobj,
607 struct kobj_attribute *attr, const char *buf, size_t count)
609 struct damon_sysfs_quotas *quotas = container_of(kobj,
610 struct damon_sysfs_quotas, kobj);
611 int err = kstrtoul(buf, 0, "as->reset_interval_ms);
618 static void damon_sysfs_quotas_release(struct kobject *kobj)
620 kfree(container_of(kobj, struct damon_sysfs_quotas, kobj));
623 static struct kobj_attribute damon_sysfs_quotas_ms_attr =
624 __ATTR_RW_MODE(ms, 0600);
626 static struct kobj_attribute damon_sysfs_quotas_sz_attr =
627 __ATTR_RW_MODE(bytes, 0600);
629 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr =
630 __ATTR_RW_MODE(reset_interval_ms, 0600);
632 static struct attribute *damon_sysfs_quotas_attrs[] = {
633 &damon_sysfs_quotas_ms_attr.attr,
634 &damon_sysfs_quotas_sz_attr.attr,
635 &damon_sysfs_quotas_reset_interval_ms_attr.attr,
638 ATTRIBUTE_GROUPS(damon_sysfs_quotas);
640 static struct kobj_type damon_sysfs_quotas_ktype = {
641 .release = damon_sysfs_quotas_release,
642 .sysfs_ops = &kobj_sysfs_ops,
643 .default_groups = damon_sysfs_quotas_groups,
647 * access_pattern directory
650 struct damon_sysfs_access_pattern {
652 struct damon_sysfs_ul_range *sz;
653 struct damon_sysfs_ul_range *nr_accesses;
654 struct damon_sysfs_ul_range *age;
658 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
660 struct damon_sysfs_access_pattern *access_pattern =
661 kmalloc(sizeof(*access_pattern), GFP_KERNEL);
665 access_pattern->kobj = (struct kobject){};
666 return access_pattern;
669 static int damon_sysfs_access_pattern_add_range_dir(
670 struct damon_sysfs_access_pattern *access_pattern,
671 struct damon_sysfs_ul_range **range_dir_ptr,
674 struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
679 err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
680 &access_pattern->kobj, name);
682 kobject_put(&range->kobj);
684 *range_dir_ptr = range;
688 static int damon_sysfs_access_pattern_add_dirs(
689 struct damon_sysfs_access_pattern *access_pattern)
693 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
694 &access_pattern->sz, "sz");
698 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
699 &access_pattern->nr_accesses, "nr_accesses");
701 goto put_nr_accesses_sz_out;
703 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
704 &access_pattern->age, "age");
706 goto put_age_nr_accesses_sz_out;
709 put_age_nr_accesses_sz_out:
710 kobject_put(&access_pattern->age->kobj);
711 access_pattern->age = NULL;
712 put_nr_accesses_sz_out:
713 kobject_put(&access_pattern->nr_accesses->kobj);
714 access_pattern->nr_accesses = NULL;
716 kobject_put(&access_pattern->sz->kobj);
717 access_pattern->sz = NULL;
721 static void damon_sysfs_access_pattern_rm_dirs(
722 struct damon_sysfs_access_pattern *access_pattern)
724 kobject_put(&access_pattern->sz->kobj);
725 kobject_put(&access_pattern->nr_accesses->kobj);
726 kobject_put(&access_pattern->age->kobj);
729 static void damon_sysfs_access_pattern_release(struct kobject *kobj)
731 kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj));
734 static struct attribute *damon_sysfs_access_pattern_attrs[] = {
737 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
739 static struct kobj_type damon_sysfs_access_pattern_ktype = {
740 .release = damon_sysfs_access_pattern_release,
741 .sysfs_ops = &kobj_sysfs_ops,
742 .default_groups = damon_sysfs_access_pattern_groups,
749 struct damon_sysfs_scheme {
751 enum damos_action action;
752 struct damon_sysfs_access_pattern *access_pattern;
753 struct damon_sysfs_quotas *quotas;
754 struct damon_sysfs_watermarks *watermarks;
755 struct damon_sysfs_stats *stats;
758 /* This should match with enum damos_action */
759 static const char * const damon_sysfs_damos_action_strs[] = {
768 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
769 enum damos_action action)
771 struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
776 scheme->kobj = (struct kobject){};
777 scheme->action = action;
781 static int damon_sysfs_scheme_set_access_pattern(
782 struct damon_sysfs_scheme *scheme)
784 struct damon_sysfs_access_pattern *access_pattern;
787 access_pattern = damon_sysfs_access_pattern_alloc();
790 err = kobject_init_and_add(&access_pattern->kobj,
791 &damon_sysfs_access_pattern_ktype, &scheme->kobj,
795 err = damon_sysfs_access_pattern_add_dirs(access_pattern);
798 scheme->access_pattern = access_pattern;
802 kobject_put(&access_pattern->kobj);
806 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme)
808 struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc();
813 err = kobject_init_and_add("as->kobj, &damon_sysfs_quotas_ktype,
814 &scheme->kobj, "quotas");
817 err = damon_sysfs_quotas_add_dirs(quotas);
820 scheme->quotas = quotas;
824 kobject_put("as->kobj);
828 static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
830 struct damon_sysfs_watermarks *watermarks =
831 damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0);
836 err = kobject_init_and_add(&watermarks->kobj,
837 &damon_sysfs_watermarks_ktype, &scheme->kobj,
840 kobject_put(&watermarks->kobj);
842 scheme->watermarks = watermarks;
846 static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme)
848 struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc();
853 err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype,
854 &scheme->kobj, "stats");
856 kobject_put(&stats->kobj);
858 scheme->stats = stats;
862 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
866 err = damon_sysfs_scheme_set_access_pattern(scheme);
869 err = damon_sysfs_scheme_set_quotas(scheme);
871 goto put_access_pattern_out;
872 err = damon_sysfs_scheme_set_watermarks(scheme);
874 goto put_quotas_access_pattern_out;
875 err = damon_sysfs_scheme_set_stats(scheme);
877 goto put_watermarks_quotas_access_pattern_out;
880 put_watermarks_quotas_access_pattern_out:
881 kobject_put(&scheme->watermarks->kobj);
882 scheme->watermarks = NULL;
883 put_quotas_access_pattern_out:
884 kobject_put(&scheme->quotas->kobj);
885 scheme->quotas = NULL;
886 put_access_pattern_out:
887 kobject_put(&scheme->access_pattern->kobj);
888 scheme->access_pattern = NULL;
892 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
894 damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
895 kobject_put(&scheme->access_pattern->kobj);
896 damon_sysfs_quotas_rm_dirs(scheme->quotas);
897 kobject_put(&scheme->quotas->kobj);
898 kobject_put(&scheme->watermarks->kobj);
899 kobject_put(&scheme->stats->kobj);
902 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr,
905 struct damon_sysfs_scheme *scheme = container_of(kobj,
906 struct damon_sysfs_scheme, kobj);
908 return sysfs_emit(buf, "%s\n",
909 damon_sysfs_damos_action_strs[scheme->action]);
912 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
913 const char *buf, size_t count)
915 struct damon_sysfs_scheme *scheme = container_of(kobj,
916 struct damon_sysfs_scheme, kobj);
917 enum damos_action action;
919 for (action = 0; action < NR_DAMOS_ACTIONS; action++) {
920 if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) {
921 scheme->action = action;
928 static void damon_sysfs_scheme_release(struct kobject *kobj)
930 kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
933 static struct kobj_attribute damon_sysfs_scheme_action_attr =
934 __ATTR_RW_MODE(action, 0600);
936 static struct attribute *damon_sysfs_scheme_attrs[] = {
937 &damon_sysfs_scheme_action_attr.attr,
940 ATTRIBUTE_GROUPS(damon_sysfs_scheme);
942 static struct kobj_type damon_sysfs_scheme_ktype = {
943 .release = damon_sysfs_scheme_release,
944 .sysfs_ops = &kobj_sysfs_ops,
945 .default_groups = damon_sysfs_scheme_groups,
952 struct damon_sysfs_schemes {
954 struct damon_sysfs_scheme **schemes_arr;
958 static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void)
960 return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL);
963 static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes)
965 struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr;
968 for (i = 0; i < schemes->nr; i++) {
969 damon_sysfs_scheme_rm_dirs(schemes_arr[i]);
970 kobject_put(&schemes_arr[i]->kobj);
974 schemes->schemes_arr = NULL;
977 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
980 struct damon_sysfs_scheme **schemes_arr, *scheme;
983 damon_sysfs_schemes_rm_dirs(schemes);
987 schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr),
988 GFP_KERNEL | __GFP_NOWARN);
991 schemes->schemes_arr = schemes_arr;
993 for (i = 0; i < nr_schemes; i++) {
994 scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
996 damon_sysfs_schemes_rm_dirs(schemes);
1000 err = kobject_init_and_add(&scheme->kobj,
1001 &damon_sysfs_scheme_ktype, &schemes->kobj,
1005 err = damon_sysfs_scheme_add_dirs(scheme);
1009 schemes_arr[i] = scheme;
1015 damon_sysfs_schemes_rm_dirs(schemes);
1016 kobject_put(&scheme->kobj);
1020 static ssize_t nr_schemes_show(struct kobject *kobj,
1021 struct kobj_attribute *attr, char *buf)
1023 struct damon_sysfs_schemes *schemes = container_of(kobj,
1024 struct damon_sysfs_schemes, kobj);
1026 return sysfs_emit(buf, "%d\n", schemes->nr);
1029 static ssize_t nr_schemes_store(struct kobject *kobj,
1030 struct kobj_attribute *attr, const char *buf, size_t count)
1032 struct damon_sysfs_schemes *schemes = container_of(kobj,
1033 struct damon_sysfs_schemes, kobj);
1034 int nr, err = kstrtoint(buf, 0, &nr);
1041 if (!mutex_trylock(&damon_sysfs_lock))
1043 err = damon_sysfs_schemes_add_dirs(schemes, nr);
1044 mutex_unlock(&damon_sysfs_lock);
1050 static void damon_sysfs_schemes_release(struct kobject *kobj)
1052 kfree(container_of(kobj, struct damon_sysfs_schemes, kobj));
1055 static struct kobj_attribute damon_sysfs_schemes_nr_attr =
1056 __ATTR_RW_MODE(nr_schemes, 0600);
1058 static struct attribute *damon_sysfs_schemes_attrs[] = {
1059 &damon_sysfs_schemes_nr_attr.attr,
1062 ATTRIBUTE_GROUPS(damon_sysfs_schemes);
1064 static struct kobj_type damon_sysfs_schemes_ktype = {
1065 .release = damon_sysfs_schemes_release,
1066 .sysfs_ops = &kobj_sysfs_ops,
1067 .default_groups = damon_sysfs_schemes_groups,
1071 * init region directory
1074 struct damon_sysfs_region {
1075 struct kobject kobj;
1076 unsigned long start;
1080 static struct damon_sysfs_region *damon_sysfs_region_alloc(
1081 unsigned long start,
1084 struct damon_sysfs_region *region = kmalloc(sizeof(*region),
1089 region->kobj = (struct kobject){};
1090 region->start = start;
1095 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
1098 struct damon_sysfs_region *region = container_of(kobj,
1099 struct damon_sysfs_region, kobj);
1101 return sysfs_emit(buf, "%lu\n", region->start);
1104 static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
1105 const char *buf, size_t count)
1107 struct damon_sysfs_region *region = container_of(kobj,
1108 struct damon_sysfs_region, kobj);
1109 int err = kstrtoul(buf, 0, ®ion->start);
1116 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
1119 struct damon_sysfs_region *region = container_of(kobj,
1120 struct damon_sysfs_region, kobj);
1122 return sysfs_emit(buf, "%lu\n", region->end);
1125 static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
1126 const char *buf, size_t count)
1128 struct damon_sysfs_region *region = container_of(kobj,
1129 struct damon_sysfs_region, kobj);
1130 int err = kstrtoul(buf, 0, ®ion->end);
1137 static void damon_sysfs_region_release(struct kobject *kobj)
1139 kfree(container_of(kobj, struct damon_sysfs_region, kobj));
1142 static struct kobj_attribute damon_sysfs_region_start_attr =
1143 __ATTR_RW_MODE(start, 0600);
1145 static struct kobj_attribute damon_sysfs_region_end_attr =
1146 __ATTR_RW_MODE(end, 0600);
1148 static struct attribute *damon_sysfs_region_attrs[] = {
1149 &damon_sysfs_region_start_attr.attr,
1150 &damon_sysfs_region_end_attr.attr,
1153 ATTRIBUTE_GROUPS(damon_sysfs_region);
1155 static struct kobj_type damon_sysfs_region_ktype = {
1156 .release = damon_sysfs_region_release,
1157 .sysfs_ops = &kobj_sysfs_ops,
1158 .default_groups = damon_sysfs_region_groups,
1162 * init_regions directory
1165 struct damon_sysfs_regions {
1166 struct kobject kobj;
1167 struct damon_sysfs_region **regions_arr;
1171 static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
1173 return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
1176 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
1178 struct damon_sysfs_region **regions_arr = regions->regions_arr;
1181 for (i = 0; i < regions->nr; i++)
1182 kobject_put(®ions_arr[i]->kobj);
1185 regions->regions_arr = NULL;
1188 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
1191 struct damon_sysfs_region **regions_arr, *region;
1194 damon_sysfs_regions_rm_dirs(regions);
1198 regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
1199 GFP_KERNEL | __GFP_NOWARN);
1202 regions->regions_arr = regions_arr;
1204 for (i = 0; i < nr_regions; i++) {
1205 region = damon_sysfs_region_alloc(0, 0);
1207 damon_sysfs_regions_rm_dirs(regions);
1211 err = kobject_init_and_add(®ion->kobj,
1212 &damon_sysfs_region_ktype, ®ions->kobj,
1215 kobject_put(®ion->kobj);
1216 damon_sysfs_regions_rm_dirs(regions);
1220 regions_arr[i] = region;
1226 static ssize_t nr_regions_show(struct kobject *kobj,
1227 struct kobj_attribute *attr, char *buf)
1229 struct damon_sysfs_regions *regions = container_of(kobj,
1230 struct damon_sysfs_regions, kobj);
1232 return sysfs_emit(buf, "%d\n", regions->nr);
1235 static ssize_t nr_regions_store(struct kobject *kobj,
1236 struct kobj_attribute *attr, const char *buf, size_t count)
1238 struct damon_sysfs_regions *regions = container_of(kobj,
1239 struct damon_sysfs_regions, kobj);
1240 int nr, err = kstrtoint(buf, 0, &nr);
1247 if (!mutex_trylock(&damon_sysfs_lock))
1249 err = damon_sysfs_regions_add_dirs(regions, nr);
1250 mutex_unlock(&damon_sysfs_lock);
1257 static void damon_sysfs_regions_release(struct kobject *kobj)
1259 kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
1262 static struct kobj_attribute damon_sysfs_regions_nr_attr =
1263 __ATTR_RW_MODE(nr_regions, 0600);
1265 static struct attribute *damon_sysfs_regions_attrs[] = {
1266 &damon_sysfs_regions_nr_attr.attr,
1269 ATTRIBUTE_GROUPS(damon_sysfs_regions);
1271 static struct kobj_type damon_sysfs_regions_ktype = {
1272 .release = damon_sysfs_regions_release,
1273 .sysfs_ops = &kobj_sysfs_ops,
1274 .default_groups = damon_sysfs_regions_groups,
1281 struct damon_sysfs_target {
1282 struct kobject kobj;
1283 struct damon_sysfs_regions *regions;
1287 static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
1289 return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
1292 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
1294 struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
1300 err = kobject_init_and_add(®ions->kobj, &damon_sysfs_regions_ktype,
1301 &target->kobj, "regions");
1303 kobject_put(®ions->kobj);
1305 target->regions = regions;
1309 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
1311 damon_sysfs_regions_rm_dirs(target->regions);
1312 kobject_put(&target->regions->kobj);
1315 static ssize_t pid_target_show(struct kobject *kobj,
1316 struct kobj_attribute *attr, char *buf)
1318 struct damon_sysfs_target *target = container_of(kobj,
1319 struct damon_sysfs_target, kobj);
1321 return sysfs_emit(buf, "%d\n", target->pid);
1324 static ssize_t pid_target_store(struct kobject *kobj,
1325 struct kobj_attribute *attr, const char *buf, size_t count)
1327 struct damon_sysfs_target *target = container_of(kobj,
1328 struct damon_sysfs_target, kobj);
1329 int err = kstrtoint(buf, 0, &target->pid);
1336 static void damon_sysfs_target_release(struct kobject *kobj)
1338 kfree(container_of(kobj, struct damon_sysfs_target, kobj));
1341 static struct kobj_attribute damon_sysfs_target_pid_attr =
1342 __ATTR_RW_MODE(pid_target, 0600);
1344 static struct attribute *damon_sysfs_target_attrs[] = {
1345 &damon_sysfs_target_pid_attr.attr,
1348 ATTRIBUTE_GROUPS(damon_sysfs_target);
1350 static struct kobj_type damon_sysfs_target_ktype = {
1351 .release = damon_sysfs_target_release,
1352 .sysfs_ops = &kobj_sysfs_ops,
1353 .default_groups = damon_sysfs_target_groups,
1360 struct damon_sysfs_targets {
1361 struct kobject kobj;
1362 struct damon_sysfs_target **targets_arr;
1366 static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
1368 return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
1371 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
1373 struct damon_sysfs_target **targets_arr = targets->targets_arr;
1376 for (i = 0; i < targets->nr; i++) {
1377 damon_sysfs_target_rm_dirs(targets_arr[i]);
1378 kobject_put(&targets_arr[i]->kobj);
1382 targets->targets_arr = NULL;
1385 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
1388 struct damon_sysfs_target **targets_arr, *target;
1391 damon_sysfs_targets_rm_dirs(targets);
1395 targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
1396 GFP_KERNEL | __GFP_NOWARN);
1399 targets->targets_arr = targets_arr;
1401 for (i = 0; i < nr_targets; i++) {
1402 target = damon_sysfs_target_alloc();
1404 damon_sysfs_targets_rm_dirs(targets);
1408 err = kobject_init_and_add(&target->kobj,
1409 &damon_sysfs_target_ktype, &targets->kobj,
1414 err = damon_sysfs_target_add_dirs(target);
1418 targets_arr[i] = target;
1424 damon_sysfs_targets_rm_dirs(targets);
1425 kobject_put(&target->kobj);
1429 static ssize_t nr_targets_show(struct kobject *kobj,
1430 struct kobj_attribute *attr, char *buf)
1432 struct damon_sysfs_targets *targets = container_of(kobj,
1433 struct damon_sysfs_targets, kobj);
1435 return sysfs_emit(buf, "%d\n", targets->nr);
1438 static ssize_t nr_targets_store(struct kobject *kobj,
1439 struct kobj_attribute *attr, const char *buf, size_t count)
1441 struct damon_sysfs_targets *targets = container_of(kobj,
1442 struct damon_sysfs_targets, kobj);
1443 int nr, err = kstrtoint(buf, 0, &nr);
1450 if (!mutex_trylock(&damon_sysfs_lock))
1452 err = damon_sysfs_targets_add_dirs(targets, nr);
1453 mutex_unlock(&damon_sysfs_lock);
1460 static void damon_sysfs_targets_release(struct kobject *kobj)
1462 kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
1465 static struct kobj_attribute damon_sysfs_targets_nr_attr =
1466 __ATTR_RW_MODE(nr_targets, 0600);
1468 static struct attribute *damon_sysfs_targets_attrs[] = {
1469 &damon_sysfs_targets_nr_attr.attr,
1472 ATTRIBUTE_GROUPS(damon_sysfs_targets);
1474 static struct kobj_type damon_sysfs_targets_ktype = {
1475 .release = damon_sysfs_targets_release,
1476 .sysfs_ops = &kobj_sysfs_ops,
1477 .default_groups = damon_sysfs_targets_groups,
1481 * intervals directory
1484 struct damon_sysfs_intervals {
1485 struct kobject kobj;
1486 unsigned long sample_us;
1487 unsigned long aggr_us;
1488 unsigned long update_us;
1491 static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
1492 unsigned long sample_us, unsigned long aggr_us,
1493 unsigned long update_us)
1495 struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
1501 intervals->kobj = (struct kobject){};
1502 intervals->sample_us = sample_us;
1503 intervals->aggr_us = aggr_us;
1504 intervals->update_us = update_us;
1508 static ssize_t sample_us_show(struct kobject *kobj,
1509 struct kobj_attribute *attr, char *buf)
1511 struct damon_sysfs_intervals *intervals = container_of(kobj,
1512 struct damon_sysfs_intervals, kobj);
1514 return sysfs_emit(buf, "%lu\n", intervals->sample_us);
1517 static ssize_t sample_us_store(struct kobject *kobj,
1518 struct kobj_attribute *attr, const char *buf, size_t count)
1520 struct damon_sysfs_intervals *intervals = container_of(kobj,
1521 struct damon_sysfs_intervals, kobj);
1523 int err = kstrtoul(buf, 0, &us);
1528 intervals->sample_us = us;
1532 static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
1535 struct damon_sysfs_intervals *intervals = container_of(kobj,
1536 struct damon_sysfs_intervals, kobj);
1538 return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
1541 static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
1542 const char *buf, size_t count)
1544 struct damon_sysfs_intervals *intervals = container_of(kobj,
1545 struct damon_sysfs_intervals, kobj);
1547 int err = kstrtoul(buf, 0, &us);
1552 intervals->aggr_us = us;
1556 static ssize_t update_us_show(struct kobject *kobj,
1557 struct kobj_attribute *attr, char *buf)
1559 struct damon_sysfs_intervals *intervals = container_of(kobj,
1560 struct damon_sysfs_intervals, kobj);
1562 return sysfs_emit(buf, "%lu\n", intervals->update_us);
1565 static ssize_t update_us_store(struct kobject *kobj,
1566 struct kobj_attribute *attr, const char *buf, size_t count)
1568 struct damon_sysfs_intervals *intervals = container_of(kobj,
1569 struct damon_sysfs_intervals, kobj);
1571 int err = kstrtoul(buf, 0, &us);
1576 intervals->update_us = us;
1580 static void damon_sysfs_intervals_release(struct kobject *kobj)
1582 kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
1585 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
1586 __ATTR_RW_MODE(sample_us, 0600);
1588 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
1589 __ATTR_RW_MODE(aggr_us, 0600);
1591 static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
1592 __ATTR_RW_MODE(update_us, 0600);
1594 static struct attribute *damon_sysfs_intervals_attrs[] = {
1595 &damon_sysfs_intervals_sample_us_attr.attr,
1596 &damon_sysfs_intervals_aggr_us_attr.attr,
1597 &damon_sysfs_intervals_update_us_attr.attr,
1600 ATTRIBUTE_GROUPS(damon_sysfs_intervals);
1602 static struct kobj_type damon_sysfs_intervals_ktype = {
1603 .release = damon_sysfs_intervals_release,
1604 .sysfs_ops = &kobj_sysfs_ops,
1605 .default_groups = damon_sysfs_intervals_groups,
1609 * monitoring_attrs directory
1612 struct damon_sysfs_attrs {
1613 struct kobject kobj;
1614 struct damon_sysfs_intervals *intervals;
1615 struct damon_sysfs_ul_range *nr_regions_range;
1618 static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
1620 struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
1624 attrs->kobj = (struct kobject){};
1628 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
1630 struct damon_sysfs_intervals *intervals;
1631 struct damon_sysfs_ul_range *nr_regions_range;
1634 intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
1638 err = kobject_init_and_add(&intervals->kobj,
1639 &damon_sysfs_intervals_ktype, &attrs->kobj,
1642 goto put_intervals_out;
1643 attrs->intervals = intervals;
1645 nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
1646 if (!nr_regions_range) {
1648 goto put_intervals_out;
1651 err = kobject_init_and_add(&nr_regions_range->kobj,
1652 &damon_sysfs_ul_range_ktype, &attrs->kobj,
1655 goto put_nr_regions_intervals_out;
1656 attrs->nr_regions_range = nr_regions_range;
1659 put_nr_regions_intervals_out:
1660 kobject_put(&nr_regions_range->kobj);
1661 attrs->nr_regions_range = NULL;
1663 kobject_put(&intervals->kobj);
1664 attrs->intervals = NULL;
1668 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
1670 kobject_put(&attrs->nr_regions_range->kobj);
1671 kobject_put(&attrs->intervals->kobj);
1674 static void damon_sysfs_attrs_release(struct kobject *kobj)
1676 kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
1679 static struct attribute *damon_sysfs_attrs_attrs[] = {
1682 ATTRIBUTE_GROUPS(damon_sysfs_attrs);
1684 static struct kobj_type damon_sysfs_attrs_ktype = {
1685 .release = damon_sysfs_attrs_release,
1686 .sysfs_ops = &kobj_sysfs_ops,
1687 .default_groups = damon_sysfs_attrs_groups,
1694 /* This should match with enum damon_ops_id */
1695 static const char * const damon_sysfs_ops_strs[] = {
1700 struct damon_sysfs_context {
1701 struct kobject kobj;
1702 enum damon_ops_id ops_id;
1703 struct damon_sysfs_attrs *attrs;
1704 struct damon_sysfs_targets *targets;
1705 struct damon_sysfs_schemes *schemes;
1708 static struct damon_sysfs_context *damon_sysfs_context_alloc(
1709 enum damon_ops_id ops_id)
1711 struct damon_sysfs_context *context = kmalloc(sizeof(*context),
1716 context->kobj = (struct kobject){};
1717 context->ops_id = ops_id;
1721 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
1723 struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
1728 err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
1729 &context->kobj, "monitoring_attrs");
1732 err = damon_sysfs_attrs_add_dirs(attrs);
1735 context->attrs = attrs;
1739 kobject_put(&attrs->kobj);
1743 static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
1745 struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
1750 err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
1751 &context->kobj, "targets");
1753 kobject_put(&targets->kobj);
1756 context->targets = targets;
1760 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
1762 struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
1767 err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
1768 &context->kobj, "schemes");
1770 kobject_put(&schemes->kobj);
1773 context->schemes = schemes;
1777 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
1781 err = damon_sysfs_context_set_attrs(context);
1785 err = damon_sysfs_context_set_targets(context);
1789 err = damon_sysfs_context_set_schemes(context);
1791 goto put_targets_attrs_out;
1794 put_targets_attrs_out:
1795 kobject_put(&context->targets->kobj);
1796 context->targets = NULL;
1798 kobject_put(&context->attrs->kobj);
1799 context->attrs = NULL;
1803 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
1805 damon_sysfs_attrs_rm_dirs(context->attrs);
1806 kobject_put(&context->attrs->kobj);
1807 damon_sysfs_targets_rm_dirs(context->targets);
1808 kobject_put(&context->targets->kobj);
1809 damon_sysfs_schemes_rm_dirs(context->schemes);
1810 kobject_put(&context->schemes->kobj);
1813 static ssize_t operations_show(struct kobject *kobj,
1814 struct kobj_attribute *attr, char *buf)
1816 struct damon_sysfs_context *context = container_of(kobj,
1817 struct damon_sysfs_context, kobj);
1819 return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
1822 static ssize_t operations_store(struct kobject *kobj,
1823 struct kobj_attribute *attr, const char *buf, size_t count)
1825 struct damon_sysfs_context *context = container_of(kobj,
1826 struct damon_sysfs_context, kobj);
1827 enum damon_ops_id id;
1829 for (id = 0; id < NR_DAMON_OPS; id++) {
1830 if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
1831 context->ops_id = id;
1838 static void damon_sysfs_context_release(struct kobject *kobj)
1840 kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1843 static struct kobj_attribute damon_sysfs_context_operations_attr =
1844 __ATTR_RW_MODE(operations, 0600);
1846 static struct attribute *damon_sysfs_context_attrs[] = {
1847 &damon_sysfs_context_operations_attr.attr,
1850 ATTRIBUTE_GROUPS(damon_sysfs_context);
1852 static struct kobj_type damon_sysfs_context_ktype = {
1853 .release = damon_sysfs_context_release,
1854 .sysfs_ops = &kobj_sysfs_ops,
1855 .default_groups = damon_sysfs_context_groups,
1859 * contexts directory
1862 struct damon_sysfs_contexts {
1863 struct kobject kobj;
1864 struct damon_sysfs_context **contexts_arr;
1868 static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1870 return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1873 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1875 struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1878 for (i = 0; i < contexts->nr; i++) {
1879 damon_sysfs_context_rm_dirs(contexts_arr[i]);
1880 kobject_put(&contexts_arr[i]->kobj);
1883 kfree(contexts_arr);
1884 contexts->contexts_arr = NULL;
1887 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1890 struct damon_sysfs_context **contexts_arr, *context;
1893 damon_sysfs_contexts_rm_dirs(contexts);
1897 contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1898 GFP_KERNEL | __GFP_NOWARN);
1901 contexts->contexts_arr = contexts_arr;
1903 for (i = 0; i < nr_contexts; i++) {
1904 context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1906 damon_sysfs_contexts_rm_dirs(contexts);
1910 err = kobject_init_and_add(&context->kobj,
1911 &damon_sysfs_context_ktype, &contexts->kobj,
1916 err = damon_sysfs_context_add_dirs(context);
1920 contexts_arr[i] = context;
1926 damon_sysfs_contexts_rm_dirs(contexts);
1927 kobject_put(&context->kobj);
1931 static ssize_t nr_contexts_show(struct kobject *kobj,
1932 struct kobj_attribute *attr, char *buf)
1934 struct damon_sysfs_contexts *contexts = container_of(kobj,
1935 struct damon_sysfs_contexts, kobj);
1937 return sysfs_emit(buf, "%d\n", contexts->nr);
1940 static ssize_t nr_contexts_store(struct kobject *kobj,
1941 struct kobj_attribute *attr, const char *buf, size_t count)
1943 struct damon_sysfs_contexts *contexts = container_of(kobj,
1944 struct damon_sysfs_contexts, kobj);
1947 err = kstrtoint(buf, 0, &nr);
1950 /* TODO: support multiple contexts per kdamond */
1951 if (nr < 0 || 1 < nr)
1954 if (!mutex_trylock(&damon_sysfs_lock))
1956 err = damon_sysfs_contexts_add_dirs(contexts, nr);
1957 mutex_unlock(&damon_sysfs_lock);
1964 static void damon_sysfs_contexts_release(struct kobject *kobj)
1966 kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1969 static struct kobj_attribute damon_sysfs_contexts_nr_attr
1970 = __ATTR_RW_MODE(nr_contexts, 0600);
1972 static struct attribute *damon_sysfs_contexts_attrs[] = {
1973 &damon_sysfs_contexts_nr_attr.attr,
1976 ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1978 static struct kobj_type damon_sysfs_contexts_ktype = {
1979 .release = damon_sysfs_contexts_release,
1980 .sysfs_ops = &kobj_sysfs_ops,
1981 .default_groups = damon_sysfs_contexts_groups,
1988 struct damon_sysfs_kdamond {
1989 struct kobject kobj;
1990 struct damon_sysfs_contexts *contexts;
1991 struct damon_ctx *damon_ctx;
1994 static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
1996 return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
1999 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
2001 struct damon_sysfs_contexts *contexts;
2004 contexts = damon_sysfs_contexts_alloc();
2008 err = kobject_init_and_add(&contexts->kobj,
2009 &damon_sysfs_contexts_ktype, &kdamond->kobj,
2012 kobject_put(&contexts->kobj);
2015 kdamond->contexts = contexts;
2020 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
2022 damon_sysfs_contexts_rm_dirs(kdamond->contexts);
2023 kobject_put(&kdamond->contexts->kobj);
2026 static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
2030 mutex_lock(&ctx->kdamond_lock);
2031 running = ctx->kdamond != NULL;
2032 mutex_unlock(&ctx->kdamond_lock);
2036 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
2039 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2040 struct damon_sysfs_kdamond, kobj);
2041 struct damon_ctx *ctx = kdamond->damon_ctx;
2047 running = damon_sysfs_ctx_running(ctx);
2049 return sysfs_emit(buf, "%s\n", running ? "on" : "off");
2052 static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
2053 struct damon_sysfs_attrs *sys_attrs)
2055 struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
2056 struct damon_sysfs_ul_range *sys_nr_regions =
2057 sys_attrs->nr_regions_range;
2059 return damon_set_attrs(ctx, sys_intervals->sample_us,
2060 sys_intervals->aggr_us, sys_intervals->update_us,
2061 sys_nr_regions->min, sys_nr_regions->max);
2064 static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
2066 struct damon_target *t, *next;
2068 damon_for_each_target_safe(t, next, ctx) {
2069 if (ctx->ops.id == DAMON_OPS_VADDR)
2071 damon_destroy_target(t);
2075 static int damon_sysfs_set_regions(struct damon_target *t,
2076 struct damon_sysfs_regions *sysfs_regions)
2080 for (i = 0; i < sysfs_regions->nr; i++) {
2081 struct damon_sysfs_region *sys_region =
2082 sysfs_regions->regions_arr[i];
2083 struct damon_region *prev, *r;
2085 if (sys_region->start > sys_region->end)
2087 r = damon_new_region(sys_region->start, sys_region->end);
2090 damon_add_region(r, t);
2091 if (damon_nr_regions(t) > 1) {
2092 prev = damon_prev_region(r);
2093 if (prev->ar.end > r->ar.start) {
2094 damon_destroy_region(r, t);
2102 static int damon_sysfs_set_targets(struct damon_ctx *ctx,
2103 struct damon_sysfs_targets *sysfs_targets)
2107 for (i = 0; i < sysfs_targets->nr; i++) {
2108 struct damon_sysfs_target *sys_target =
2109 sysfs_targets->targets_arr[i];
2110 struct damon_target *t = damon_new_target();
2113 damon_sysfs_destroy_targets(ctx);
2116 if (ctx->ops.id == DAMON_OPS_VADDR) {
2117 t->pid = find_get_pid(sys_target->pid);
2119 damon_sysfs_destroy_targets(ctx);
2123 damon_add_target(ctx, t);
2124 err = damon_sysfs_set_regions(t, sys_target->regions);
2126 damon_sysfs_destroy_targets(ctx);
2133 static struct damos *damon_sysfs_mk_scheme(
2134 struct damon_sysfs_scheme *sysfs_scheme)
2136 struct damon_sysfs_access_pattern *pattern =
2137 sysfs_scheme->access_pattern;
2138 struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
2139 struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
2140 struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
2141 struct damos_quota quota = {
2142 .ms = sysfs_quotas->ms,
2143 .sz = sysfs_quotas->sz,
2144 .reset_interval = sysfs_quotas->reset_interval_ms,
2145 .weight_sz = sysfs_weights->sz,
2146 .weight_nr_accesses = sysfs_weights->nr_accesses,
2147 .weight_age = sysfs_weights->age,
2149 struct damos_watermarks wmarks = {
2150 .metric = sysfs_wmarks->metric,
2151 .interval = sysfs_wmarks->interval_us,
2152 .high = sysfs_wmarks->high,
2153 .mid = sysfs_wmarks->mid,
2154 .low = sysfs_wmarks->low,
2157 return damon_new_scheme(pattern->sz->min, pattern->sz->max,
2158 pattern->nr_accesses->min, pattern->nr_accesses->max,
2159 pattern->age->min, pattern->age->max,
2160 sysfs_scheme->action, "a, &wmarks);
2163 static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
2164 struct damon_sysfs_schemes *sysfs_schemes)
2168 for (i = 0; i < sysfs_schemes->nr; i++) {
2169 struct damos *scheme, *next;
2171 scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
2173 damon_for_each_scheme_safe(scheme, next, ctx)
2174 damon_destroy_scheme(scheme);
2177 damon_add_scheme(ctx, scheme);
2182 static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
2184 struct damon_target *t, *next;
2186 if (ctx->ops.id != DAMON_OPS_VADDR)
2189 mutex_lock(&ctx->kdamond_lock);
2190 damon_for_each_target_safe(t, next, ctx) {
2192 damon_destroy_target(t);
2194 mutex_unlock(&ctx->kdamond_lock);
2197 static struct damon_ctx *damon_sysfs_build_ctx(
2198 struct damon_sysfs_context *sys_ctx)
2200 struct damon_ctx *ctx = damon_new_ctx();
2204 return ERR_PTR(-ENOMEM);
2206 err = damon_select_ops(ctx, sys_ctx->ops_id);
2209 err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2212 err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2215 err = damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2219 ctx->callback.before_terminate = damon_sysfs_before_terminate;
2223 damon_destroy_ctx(ctx);
2224 return ERR_PTR(err);
2227 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
2229 struct damon_ctx *ctx;
2232 if (kdamond->damon_ctx &&
2233 damon_sysfs_ctx_running(kdamond->damon_ctx))
2235 /* TODO: support multiple contexts per kdamond */
2236 if (kdamond->contexts->nr != 1)
2239 if (kdamond->damon_ctx)
2240 damon_destroy_ctx(kdamond->damon_ctx);
2241 kdamond->damon_ctx = NULL;
2243 ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
2245 return PTR_ERR(ctx);
2246 err = damon_start(&ctx, 1, false);
2248 damon_destroy_ctx(ctx);
2251 kdamond->damon_ctx = ctx;
2255 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
2257 if (!kdamond->damon_ctx)
2259 return damon_stop(&kdamond->damon_ctx, 1);
2261 * To allow users show final monitoring results of already turned-off
2262 * DAMON, we free kdamond->damon_ctx in next
2263 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
2267 static int damon_sysfs_update_schemes_stats(struct damon_sysfs_kdamond *kdamond)
2269 struct damon_ctx *ctx = kdamond->damon_ctx;
2270 struct damos *scheme;
2271 int schemes_idx = 0;
2275 mutex_lock(&ctx->kdamond_lock);
2276 damon_for_each_scheme(scheme, ctx) {
2277 struct damon_sysfs_schemes *sysfs_schemes;
2278 struct damon_sysfs_stats *sysfs_stats;
2280 sysfs_schemes = kdamond->contexts->contexts_arr[0]->schemes;
2281 sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
2282 sysfs_stats->nr_tried = scheme->stat.nr_tried;
2283 sysfs_stats->sz_tried = scheme->stat.sz_tried;
2284 sysfs_stats->nr_applied = scheme->stat.nr_applied;
2285 sysfs_stats->sz_applied = scheme->stat.sz_applied;
2286 sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds;
2288 mutex_unlock(&ctx->kdamond_lock);
2292 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
2293 const char *buf, size_t count)
2295 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2296 struct damon_sysfs_kdamond, kobj);
2299 if (!mutex_trylock(&damon_sysfs_lock))
2301 if (sysfs_streq(buf, "on"))
2302 ret = damon_sysfs_turn_damon_on(kdamond);
2303 else if (sysfs_streq(buf, "off"))
2304 ret = damon_sysfs_turn_damon_off(kdamond);
2305 else if (sysfs_streq(buf, "update_schemes_stats"))
2306 ret = damon_sysfs_update_schemes_stats(kdamond);
2309 mutex_unlock(&damon_sysfs_lock);
2315 static ssize_t pid_show(struct kobject *kobj,
2316 struct kobj_attribute *attr, char *buf)
2318 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2319 struct damon_sysfs_kdamond, kobj);
2320 struct damon_ctx *ctx;
2323 if (!mutex_trylock(&damon_sysfs_lock))
2325 ctx = kdamond->damon_ctx;
2330 mutex_lock(&ctx->kdamond_lock);
2334 pid = ctx->kdamond->pid;
2335 mutex_unlock(&ctx->kdamond_lock);
2337 mutex_unlock(&damon_sysfs_lock);
2338 return sysfs_emit(buf, "%d\n", pid);
2341 static void damon_sysfs_kdamond_release(struct kobject *kobj)
2343 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2344 struct damon_sysfs_kdamond, kobj);
2346 if (kdamond->damon_ctx)
2347 damon_destroy_ctx(kdamond->damon_ctx);
2351 static struct kobj_attribute damon_sysfs_kdamond_state_attr =
2352 __ATTR_RW_MODE(state, 0600);
2354 static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
2355 __ATTR_RO_MODE(pid, 0400);
2357 static struct attribute *damon_sysfs_kdamond_attrs[] = {
2358 &damon_sysfs_kdamond_state_attr.attr,
2359 &damon_sysfs_kdamond_pid_attr.attr,
2362 ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
2364 static struct kobj_type damon_sysfs_kdamond_ktype = {
2365 .release = damon_sysfs_kdamond_release,
2366 .sysfs_ops = &kobj_sysfs_ops,
2367 .default_groups = damon_sysfs_kdamond_groups,
2371 * kdamonds directory
2374 struct damon_sysfs_kdamonds {
2375 struct kobject kobj;
2376 struct damon_sysfs_kdamond **kdamonds_arr;
2380 static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
2382 return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
2385 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
2387 struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
2390 for (i = 0; i < kdamonds->nr; i++) {
2391 damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
2392 kobject_put(&kdamonds_arr[i]->kobj);
2395 kfree(kdamonds_arr);
2396 kdamonds->kdamonds_arr = NULL;
2399 static int damon_sysfs_nr_running_ctxs(struct damon_sysfs_kdamond **kdamonds,
2402 int nr_running_ctxs = 0;
2405 for (i = 0; i < nr_kdamonds; i++) {
2406 struct damon_ctx *ctx = kdamonds[i]->damon_ctx;
2410 mutex_lock(&ctx->kdamond_lock);
2413 mutex_unlock(&ctx->kdamond_lock);
2415 return nr_running_ctxs;
2418 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
2421 struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
2424 if (damon_sysfs_nr_running_ctxs(kdamonds->kdamonds_arr, kdamonds->nr))
2427 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2431 kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
2432 GFP_KERNEL | __GFP_NOWARN);
2435 kdamonds->kdamonds_arr = kdamonds_arr;
2437 for (i = 0; i < nr_kdamonds; i++) {
2438 kdamond = damon_sysfs_kdamond_alloc();
2440 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2444 err = kobject_init_and_add(&kdamond->kobj,
2445 &damon_sysfs_kdamond_ktype, &kdamonds->kobj,
2450 err = damon_sysfs_kdamond_add_dirs(kdamond);
2454 kdamonds_arr[i] = kdamond;
2460 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2461 kobject_put(&kdamond->kobj);
2465 static ssize_t nr_kdamonds_show(struct kobject *kobj,
2466 struct kobj_attribute *attr, char *buf)
2468 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2469 struct damon_sysfs_kdamonds, kobj);
2471 return sysfs_emit(buf, "%d\n", kdamonds->nr);
2474 static ssize_t nr_kdamonds_store(struct kobject *kobj,
2475 struct kobj_attribute *attr, const char *buf, size_t count)
2477 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2478 struct damon_sysfs_kdamonds, kobj);
2481 err = kstrtoint(buf, 0, &nr);
2487 if (!mutex_trylock(&damon_sysfs_lock))
2489 err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2490 mutex_unlock(&damon_sysfs_lock);
2497 static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2499 kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2502 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2503 __ATTR_RW_MODE(nr_kdamonds, 0600);
2505 static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2506 &damon_sysfs_kdamonds_nr_attr.attr,
2509 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2511 static struct kobj_type damon_sysfs_kdamonds_ktype = {
2512 .release = damon_sysfs_kdamonds_release,
2513 .sysfs_ops = &kobj_sysfs_ops,
2514 .default_groups = damon_sysfs_kdamonds_groups,
2518 * damon user interface directory
2521 struct damon_sysfs_ui_dir {
2522 struct kobject kobj;
2523 struct damon_sysfs_kdamonds *kdamonds;
2526 static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2528 return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2531 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2533 struct damon_sysfs_kdamonds *kdamonds;
2536 kdamonds = damon_sysfs_kdamonds_alloc();
2540 err = kobject_init_and_add(&kdamonds->kobj,
2541 &damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2544 kobject_put(&kdamonds->kobj);
2547 ui_dir->kdamonds = kdamonds;
2551 static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2553 kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2556 static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2559 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2561 static struct kobj_type damon_sysfs_ui_dir_ktype = {
2562 .release = damon_sysfs_ui_dir_release,
2563 .sysfs_ops = &kobj_sysfs_ops,
2564 .default_groups = damon_sysfs_ui_dir_groups,
2567 static int __init damon_sysfs_init(void)
2569 struct kobject *damon_sysfs_root;
2570 struct damon_sysfs_ui_dir *admin;
2573 damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2574 if (!damon_sysfs_root)
2577 admin = damon_sysfs_ui_dir_alloc();
2579 kobject_put(damon_sysfs_root);
2582 err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2583 damon_sysfs_root, "admin");
2586 err = damon_sysfs_ui_dir_add_dirs(admin);
2592 kobject_put(&admin->kobj);
2593 kobject_put(damon_sysfs_root);
2596 subsys_initcall(damon_sysfs_init);