1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON sysfs Interface
8 #include <linux/damon.h>
9 #include <linux/kobject.h>
10 #include <linux/pid.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
14 static DEFINE_MUTEX(damon_sysfs_lock);
17 * unsigned long range directory
20 struct damon_sysfs_ul_range {
26 static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
30 struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
35 range->kobj = (struct kobject){};
42 static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
45 struct damon_sysfs_ul_range *range = container_of(kobj,
46 struct damon_sysfs_ul_range, kobj);
48 return sysfs_emit(buf, "%lu\n", range->min);
51 static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
52 const char *buf, size_t count)
54 struct damon_sysfs_ul_range *range = container_of(kobj,
55 struct damon_sysfs_ul_range, kobj);
59 err = kstrtoul(buf, 0, &min);
67 static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
70 struct damon_sysfs_ul_range *range = container_of(kobj,
71 struct damon_sysfs_ul_range, kobj);
73 return sysfs_emit(buf, "%lu\n", range->max);
76 static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
77 const char *buf, size_t count)
79 struct damon_sysfs_ul_range *range = container_of(kobj,
80 struct damon_sysfs_ul_range, kobj);
84 err = kstrtoul(buf, 0, &max);
92 static void damon_sysfs_ul_range_release(struct kobject *kobj)
94 kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
97 static struct kobj_attribute damon_sysfs_ul_range_min_attr =
98 __ATTR_RW_MODE(min, 0600);
100 static struct kobj_attribute damon_sysfs_ul_range_max_attr =
101 __ATTR_RW_MODE(max, 0600);
103 static struct attribute *damon_sysfs_ul_range_attrs[] = {
104 &damon_sysfs_ul_range_min_attr.attr,
105 &damon_sysfs_ul_range_max_attr.attr,
108 ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
110 static struct kobj_type damon_sysfs_ul_range_ktype = {
111 .release = damon_sysfs_ul_range_release,
112 .sysfs_ops = &kobj_sysfs_ops,
113 .default_groups = damon_sysfs_ul_range_groups,
117 * schemes/stats directory
120 struct damon_sysfs_stats {
122 unsigned long nr_tried;
123 unsigned long sz_tried;
124 unsigned long nr_applied;
125 unsigned long sz_applied;
126 unsigned long qt_exceeds;
129 static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void)
131 return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL);
134 static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
137 struct damon_sysfs_stats *stats = container_of(kobj,
138 struct damon_sysfs_stats, kobj);
140 return sysfs_emit(buf, "%lu\n", stats->nr_tried);
143 static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
146 struct damon_sysfs_stats *stats = container_of(kobj,
147 struct damon_sysfs_stats, kobj);
149 return sysfs_emit(buf, "%lu\n", stats->sz_tried);
152 static ssize_t nr_applied_show(struct kobject *kobj,
153 struct kobj_attribute *attr, char *buf)
155 struct damon_sysfs_stats *stats = container_of(kobj,
156 struct damon_sysfs_stats, kobj);
158 return sysfs_emit(buf, "%lu\n", stats->nr_applied);
161 static ssize_t sz_applied_show(struct kobject *kobj,
162 struct kobj_attribute *attr, char *buf)
164 struct damon_sysfs_stats *stats = container_of(kobj,
165 struct damon_sysfs_stats, kobj);
167 return sysfs_emit(buf, "%lu\n", stats->sz_applied);
170 static ssize_t qt_exceeds_show(struct kobject *kobj,
171 struct kobj_attribute *attr, char *buf)
173 struct damon_sysfs_stats *stats = container_of(kobj,
174 struct damon_sysfs_stats, kobj);
176 return sysfs_emit(buf, "%lu\n", stats->qt_exceeds);
179 static void damon_sysfs_stats_release(struct kobject *kobj)
181 kfree(container_of(kobj, struct damon_sysfs_stats, kobj));
184 static struct kobj_attribute damon_sysfs_stats_nr_tried_attr =
185 __ATTR_RO_MODE(nr_tried, 0400);
187 static struct kobj_attribute damon_sysfs_stats_sz_tried_attr =
188 __ATTR_RO_MODE(sz_tried, 0400);
190 static struct kobj_attribute damon_sysfs_stats_nr_applied_attr =
191 __ATTR_RO_MODE(nr_applied, 0400);
193 static struct kobj_attribute damon_sysfs_stats_sz_applied_attr =
194 __ATTR_RO_MODE(sz_applied, 0400);
196 static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr =
197 __ATTR_RO_MODE(qt_exceeds, 0400);
199 static struct attribute *damon_sysfs_stats_attrs[] = {
200 &damon_sysfs_stats_nr_tried_attr.attr,
201 &damon_sysfs_stats_sz_tried_attr.attr,
202 &damon_sysfs_stats_nr_applied_attr.attr,
203 &damon_sysfs_stats_sz_applied_attr.attr,
204 &damon_sysfs_stats_qt_exceeds_attr.attr,
207 ATTRIBUTE_GROUPS(damon_sysfs_stats);
209 static struct kobj_type damon_sysfs_stats_ktype = {
210 .release = damon_sysfs_stats_release,
211 .sysfs_ops = &kobj_sysfs_ops,
212 .default_groups = damon_sysfs_stats_groups,
216 * watermarks directory
219 struct damon_sysfs_watermarks {
221 enum damos_wmark_metric metric;
222 unsigned long interval_us;
228 static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
229 enum damos_wmark_metric metric, unsigned long interval_us,
230 unsigned long high, unsigned long mid, unsigned long low)
232 struct damon_sysfs_watermarks *watermarks = kmalloc(
233 sizeof(*watermarks), GFP_KERNEL);
237 watermarks->kobj = (struct kobject){};
238 watermarks->metric = metric;
239 watermarks->interval_us = interval_us;
240 watermarks->high = high;
241 watermarks->mid = mid;
242 watermarks->low = low;
246 /* Should match with enum damos_wmark_metric */
247 static const char * const damon_sysfs_wmark_metric_strs[] = {
252 static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr,
255 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
256 struct damon_sysfs_watermarks, kobj);
258 return sysfs_emit(buf, "%s\n",
259 damon_sysfs_wmark_metric_strs[watermarks->metric]);
262 static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr,
263 const char *buf, size_t count)
265 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
266 struct damon_sysfs_watermarks, kobj);
267 enum damos_wmark_metric metric;
269 for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) {
270 if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) {
271 watermarks->metric = metric;
278 static ssize_t interval_us_show(struct kobject *kobj,
279 struct kobj_attribute *attr, char *buf)
281 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
282 struct damon_sysfs_watermarks, kobj);
284 return sysfs_emit(buf, "%lu\n", watermarks->interval_us);
287 static ssize_t interval_us_store(struct kobject *kobj,
288 struct kobj_attribute *attr, const char *buf, size_t count)
290 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
291 struct damon_sysfs_watermarks, kobj);
292 int err = kstrtoul(buf, 0, &watermarks->interval_us);
299 static ssize_t high_show(struct kobject *kobj,
300 struct kobj_attribute *attr, char *buf)
302 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
303 struct damon_sysfs_watermarks, kobj);
305 return sysfs_emit(buf, "%lu\n", watermarks->high);
308 static ssize_t high_store(struct kobject *kobj,
309 struct kobj_attribute *attr, const char *buf, size_t count)
311 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
312 struct damon_sysfs_watermarks, kobj);
313 int err = kstrtoul(buf, 0, &watermarks->high);
320 static ssize_t mid_show(struct kobject *kobj,
321 struct kobj_attribute *attr, char *buf)
323 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
324 struct damon_sysfs_watermarks, kobj);
326 return sysfs_emit(buf, "%lu\n", watermarks->mid);
329 static ssize_t mid_store(struct kobject *kobj,
330 struct kobj_attribute *attr, const char *buf, size_t count)
332 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
333 struct damon_sysfs_watermarks, kobj);
334 int err = kstrtoul(buf, 0, &watermarks->mid);
341 static ssize_t low_show(struct kobject *kobj,
342 struct kobj_attribute *attr, char *buf)
344 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
345 struct damon_sysfs_watermarks, kobj);
347 return sysfs_emit(buf, "%lu\n", watermarks->low);
350 static ssize_t low_store(struct kobject *kobj,
351 struct kobj_attribute *attr, const char *buf, size_t count)
353 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
354 struct damon_sysfs_watermarks, kobj);
355 int err = kstrtoul(buf, 0, &watermarks->low);
362 static void damon_sysfs_watermarks_release(struct kobject *kobj)
364 kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj));
367 static struct kobj_attribute damon_sysfs_watermarks_metric_attr =
368 __ATTR_RW_MODE(metric, 0600);
370 static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr =
371 __ATTR_RW_MODE(interval_us, 0600);
373 static struct kobj_attribute damon_sysfs_watermarks_high_attr =
374 __ATTR_RW_MODE(high, 0600);
376 static struct kobj_attribute damon_sysfs_watermarks_mid_attr =
377 __ATTR_RW_MODE(mid, 0600);
379 static struct kobj_attribute damon_sysfs_watermarks_low_attr =
380 __ATTR_RW_MODE(low, 0600);
382 static struct attribute *damon_sysfs_watermarks_attrs[] = {
383 &damon_sysfs_watermarks_metric_attr.attr,
384 &damon_sysfs_watermarks_interval_us_attr.attr,
385 &damon_sysfs_watermarks_high_attr.attr,
386 &damon_sysfs_watermarks_mid_attr.attr,
387 &damon_sysfs_watermarks_low_attr.attr,
390 ATTRIBUTE_GROUPS(damon_sysfs_watermarks);
392 static struct kobj_type damon_sysfs_watermarks_ktype = {
393 .release = damon_sysfs_watermarks_release,
394 .sysfs_ops = &kobj_sysfs_ops,
395 .default_groups = damon_sysfs_watermarks_groups,
399 * scheme/weights directory
402 struct damon_sysfs_weights {
405 unsigned int nr_accesses;
409 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz,
410 unsigned int nr_accesses, unsigned int age)
412 struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights),
417 weights->kobj = (struct kobject){};
419 weights->nr_accesses = nr_accesses;
424 static ssize_t sz_permil_show(struct kobject *kobj,
425 struct kobj_attribute *attr, char *buf)
427 struct damon_sysfs_weights *weights = container_of(kobj,
428 struct damon_sysfs_weights, kobj);
430 return sysfs_emit(buf, "%u\n", weights->sz);
433 static ssize_t sz_permil_store(struct kobject *kobj,
434 struct kobj_attribute *attr, const char *buf, size_t count)
436 struct damon_sysfs_weights *weights = container_of(kobj,
437 struct damon_sysfs_weights, kobj);
438 int err = kstrtouint(buf, 0, &weights->sz);
445 static ssize_t nr_accesses_permil_show(struct kobject *kobj,
446 struct kobj_attribute *attr, char *buf)
448 struct damon_sysfs_weights *weights = container_of(kobj,
449 struct damon_sysfs_weights, kobj);
451 return sysfs_emit(buf, "%u\n", weights->nr_accesses);
454 static ssize_t nr_accesses_permil_store(struct kobject *kobj,
455 struct kobj_attribute *attr, const char *buf, size_t count)
457 struct damon_sysfs_weights *weights = container_of(kobj,
458 struct damon_sysfs_weights, kobj);
459 int err = kstrtouint(buf, 0, &weights->nr_accesses);
466 static ssize_t age_permil_show(struct kobject *kobj,
467 struct kobj_attribute *attr, char *buf)
469 struct damon_sysfs_weights *weights = container_of(kobj,
470 struct damon_sysfs_weights, kobj);
472 return sysfs_emit(buf, "%u\n", weights->age);
475 static ssize_t age_permil_store(struct kobject *kobj,
476 struct kobj_attribute *attr, const char *buf, size_t count)
478 struct damon_sysfs_weights *weights = container_of(kobj,
479 struct damon_sysfs_weights, kobj);
480 int err = kstrtouint(buf, 0, &weights->age);
487 static void damon_sysfs_weights_release(struct kobject *kobj)
489 kfree(container_of(kobj, struct damon_sysfs_weights, kobj));
492 static struct kobj_attribute damon_sysfs_weights_sz_attr =
493 __ATTR_RW_MODE(sz_permil, 0600);
495 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr =
496 __ATTR_RW_MODE(nr_accesses_permil, 0600);
498 static struct kobj_attribute damon_sysfs_weights_age_attr =
499 __ATTR_RW_MODE(age_permil, 0600);
501 static struct attribute *damon_sysfs_weights_attrs[] = {
502 &damon_sysfs_weights_sz_attr.attr,
503 &damon_sysfs_weights_nr_accesses_attr.attr,
504 &damon_sysfs_weights_age_attr.attr,
507 ATTRIBUTE_GROUPS(damon_sysfs_weights);
509 static struct kobj_type damon_sysfs_weights_ktype = {
510 .release = damon_sysfs_weights_release,
511 .sysfs_ops = &kobj_sysfs_ops,
512 .default_groups = damon_sysfs_weights_groups,
519 struct damon_sysfs_quotas {
521 struct damon_sysfs_weights *weights;
524 unsigned long reset_interval_ms;
527 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
529 return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL);
532 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
534 struct damon_sysfs_weights *weights;
537 weights = damon_sysfs_weights_alloc(0, 0, 0);
541 err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
542 "as->kobj, "weights");
544 kobject_put(&weights->kobj);
546 quotas->weights = weights;
550 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
552 kobject_put("as->weights->kobj);
555 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
558 struct damon_sysfs_quotas *quotas = container_of(kobj,
559 struct damon_sysfs_quotas, kobj);
561 return sysfs_emit(buf, "%lu\n", quotas->ms);
564 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr,
565 const char *buf, size_t count)
567 struct damon_sysfs_quotas *quotas = container_of(kobj,
568 struct damon_sysfs_quotas, kobj);
569 int err = kstrtoul(buf, 0, "as->ms);
576 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr,
579 struct damon_sysfs_quotas *quotas = container_of(kobj,
580 struct damon_sysfs_quotas, kobj);
582 return sysfs_emit(buf, "%lu\n", quotas->sz);
585 static ssize_t bytes_store(struct kobject *kobj,
586 struct kobj_attribute *attr, const char *buf, size_t count)
588 struct damon_sysfs_quotas *quotas = container_of(kobj,
589 struct damon_sysfs_quotas, kobj);
590 int err = kstrtoul(buf, 0, "as->sz);
597 static ssize_t reset_interval_ms_show(struct kobject *kobj,
598 struct kobj_attribute *attr, char *buf)
600 struct damon_sysfs_quotas *quotas = container_of(kobj,
601 struct damon_sysfs_quotas, kobj);
603 return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms);
606 static ssize_t reset_interval_ms_store(struct kobject *kobj,
607 struct kobj_attribute *attr, const char *buf, size_t count)
609 struct damon_sysfs_quotas *quotas = container_of(kobj,
610 struct damon_sysfs_quotas, kobj);
611 int err = kstrtoul(buf, 0, "as->reset_interval_ms);
618 static void damon_sysfs_quotas_release(struct kobject *kobj)
620 kfree(container_of(kobj, struct damon_sysfs_quotas, kobj));
623 static struct kobj_attribute damon_sysfs_quotas_ms_attr =
624 __ATTR_RW_MODE(ms, 0600);
626 static struct kobj_attribute damon_sysfs_quotas_sz_attr =
627 __ATTR_RW_MODE(bytes, 0600);
629 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr =
630 __ATTR_RW_MODE(reset_interval_ms, 0600);
632 static struct attribute *damon_sysfs_quotas_attrs[] = {
633 &damon_sysfs_quotas_ms_attr.attr,
634 &damon_sysfs_quotas_sz_attr.attr,
635 &damon_sysfs_quotas_reset_interval_ms_attr.attr,
638 ATTRIBUTE_GROUPS(damon_sysfs_quotas);
640 static struct kobj_type damon_sysfs_quotas_ktype = {
641 .release = damon_sysfs_quotas_release,
642 .sysfs_ops = &kobj_sysfs_ops,
643 .default_groups = damon_sysfs_quotas_groups,
647 * access_pattern directory
650 struct damon_sysfs_access_pattern {
652 struct damon_sysfs_ul_range *sz;
653 struct damon_sysfs_ul_range *nr_accesses;
654 struct damon_sysfs_ul_range *age;
658 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
660 struct damon_sysfs_access_pattern *access_pattern =
661 kmalloc(sizeof(*access_pattern), GFP_KERNEL);
665 access_pattern->kobj = (struct kobject){};
666 return access_pattern;
669 static int damon_sysfs_access_pattern_add_range_dir(
670 struct damon_sysfs_access_pattern *access_pattern,
671 struct damon_sysfs_ul_range **range_dir_ptr,
674 struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
679 err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
680 &access_pattern->kobj, name);
682 kobject_put(&range->kobj);
684 *range_dir_ptr = range;
688 static int damon_sysfs_access_pattern_add_dirs(
689 struct damon_sysfs_access_pattern *access_pattern)
693 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
694 &access_pattern->sz, "sz");
698 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
699 &access_pattern->nr_accesses, "nr_accesses");
701 goto put_nr_accesses_sz_out;
703 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
704 &access_pattern->age, "age");
706 goto put_age_nr_accesses_sz_out;
709 put_age_nr_accesses_sz_out:
710 kobject_put(&access_pattern->age->kobj);
711 access_pattern->age = NULL;
712 put_nr_accesses_sz_out:
713 kobject_put(&access_pattern->nr_accesses->kobj);
714 access_pattern->nr_accesses = NULL;
716 kobject_put(&access_pattern->sz->kobj);
717 access_pattern->sz = NULL;
721 static void damon_sysfs_access_pattern_rm_dirs(
722 struct damon_sysfs_access_pattern *access_pattern)
724 kobject_put(&access_pattern->sz->kobj);
725 kobject_put(&access_pattern->nr_accesses->kobj);
726 kobject_put(&access_pattern->age->kobj);
729 static void damon_sysfs_access_pattern_release(struct kobject *kobj)
731 kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj));
734 static struct attribute *damon_sysfs_access_pattern_attrs[] = {
737 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
739 static struct kobj_type damon_sysfs_access_pattern_ktype = {
740 .release = damon_sysfs_access_pattern_release,
741 .sysfs_ops = &kobj_sysfs_ops,
742 .default_groups = damon_sysfs_access_pattern_groups,
749 struct damon_sysfs_scheme {
751 enum damos_action action;
752 struct damon_sysfs_access_pattern *access_pattern;
753 struct damon_sysfs_quotas *quotas;
754 struct damon_sysfs_watermarks *watermarks;
755 struct damon_sysfs_stats *stats;
758 /* This should match with enum damos_action */
759 static const char * const damon_sysfs_damos_action_strs[] = {
768 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
769 enum damos_action action)
771 struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
776 scheme->kobj = (struct kobject){};
777 scheme->action = action;
781 static int damon_sysfs_scheme_set_access_pattern(
782 struct damon_sysfs_scheme *scheme)
784 struct damon_sysfs_access_pattern *access_pattern;
787 access_pattern = damon_sysfs_access_pattern_alloc();
790 err = kobject_init_and_add(&access_pattern->kobj,
791 &damon_sysfs_access_pattern_ktype, &scheme->kobj,
795 err = damon_sysfs_access_pattern_add_dirs(access_pattern);
798 scheme->access_pattern = access_pattern;
802 kobject_put(&access_pattern->kobj);
806 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme)
808 struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc();
813 err = kobject_init_and_add("as->kobj, &damon_sysfs_quotas_ktype,
814 &scheme->kobj, "quotas");
817 err = damon_sysfs_quotas_add_dirs(quotas);
820 scheme->quotas = quotas;
824 kobject_put("as->kobj);
828 static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
830 struct damon_sysfs_watermarks *watermarks =
831 damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0);
836 err = kobject_init_and_add(&watermarks->kobj,
837 &damon_sysfs_watermarks_ktype, &scheme->kobj,
840 kobject_put(&watermarks->kobj);
842 scheme->watermarks = watermarks;
846 static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme)
848 struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc();
853 err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype,
854 &scheme->kobj, "stats");
856 kobject_put(&stats->kobj);
858 scheme->stats = stats;
862 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
866 err = damon_sysfs_scheme_set_access_pattern(scheme);
869 err = damon_sysfs_scheme_set_quotas(scheme);
871 goto put_access_pattern_out;
872 err = damon_sysfs_scheme_set_watermarks(scheme);
874 goto put_quotas_access_pattern_out;
875 err = damon_sysfs_scheme_set_stats(scheme);
877 goto put_watermarks_quotas_access_pattern_out;
880 put_watermarks_quotas_access_pattern_out:
881 kobject_put(&scheme->watermarks->kobj);
882 scheme->watermarks = NULL;
883 put_quotas_access_pattern_out:
884 kobject_put(&scheme->quotas->kobj);
885 scheme->quotas = NULL;
886 put_access_pattern_out:
887 kobject_put(&scheme->access_pattern->kobj);
888 scheme->access_pattern = NULL;
892 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
894 damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
895 kobject_put(&scheme->access_pattern->kobj);
896 damon_sysfs_quotas_rm_dirs(scheme->quotas);
897 kobject_put(&scheme->quotas->kobj);
898 kobject_put(&scheme->watermarks->kobj);
899 kobject_put(&scheme->stats->kobj);
902 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr,
905 struct damon_sysfs_scheme *scheme = container_of(kobj,
906 struct damon_sysfs_scheme, kobj);
908 return sysfs_emit(buf, "%s\n",
909 damon_sysfs_damos_action_strs[scheme->action]);
912 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
913 const char *buf, size_t count)
915 struct damon_sysfs_scheme *scheme = container_of(kobj,
916 struct damon_sysfs_scheme, kobj);
917 enum damos_action action;
919 for (action = 0; action < NR_DAMOS_ACTIONS; action++) {
920 if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) {
921 scheme->action = action;
928 static void damon_sysfs_scheme_release(struct kobject *kobj)
930 kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
933 static struct kobj_attribute damon_sysfs_scheme_action_attr =
934 __ATTR_RW_MODE(action, 0600);
936 static struct attribute *damon_sysfs_scheme_attrs[] = {
937 &damon_sysfs_scheme_action_attr.attr,
940 ATTRIBUTE_GROUPS(damon_sysfs_scheme);
942 static struct kobj_type damon_sysfs_scheme_ktype = {
943 .release = damon_sysfs_scheme_release,
944 .sysfs_ops = &kobj_sysfs_ops,
945 .default_groups = damon_sysfs_scheme_groups,
952 struct damon_sysfs_schemes {
954 struct damon_sysfs_scheme **schemes_arr;
958 static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void)
960 return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL);
963 static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes)
965 struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr;
968 for (i = 0; i < schemes->nr; i++) {
969 damon_sysfs_scheme_rm_dirs(schemes_arr[i]);
970 kobject_put(&schemes_arr[i]->kobj);
974 schemes->schemes_arr = NULL;
977 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
980 struct damon_sysfs_scheme **schemes_arr, *scheme;
983 damon_sysfs_schemes_rm_dirs(schemes);
987 schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr),
988 GFP_KERNEL | __GFP_NOWARN);
991 schemes->schemes_arr = schemes_arr;
993 for (i = 0; i < nr_schemes; i++) {
994 scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
996 damon_sysfs_schemes_rm_dirs(schemes);
1000 err = kobject_init_and_add(&scheme->kobj,
1001 &damon_sysfs_scheme_ktype, &schemes->kobj,
1005 err = damon_sysfs_scheme_add_dirs(scheme);
1009 schemes_arr[i] = scheme;
1015 damon_sysfs_schemes_rm_dirs(schemes);
1016 kobject_put(&scheme->kobj);
1020 static ssize_t nr_schemes_show(struct kobject *kobj,
1021 struct kobj_attribute *attr, char *buf)
1023 struct damon_sysfs_schemes *schemes = container_of(kobj,
1024 struct damon_sysfs_schemes, kobj);
1026 return sysfs_emit(buf, "%d\n", schemes->nr);
1029 static ssize_t nr_schemes_store(struct kobject *kobj,
1030 struct kobj_attribute *attr, const char *buf, size_t count)
1032 struct damon_sysfs_schemes *schemes = container_of(kobj,
1033 struct damon_sysfs_schemes, kobj);
1034 int nr, err = kstrtoint(buf, 0, &nr);
1041 if (!mutex_trylock(&damon_sysfs_lock))
1043 err = damon_sysfs_schemes_add_dirs(schemes, nr);
1044 mutex_unlock(&damon_sysfs_lock);
1050 static void damon_sysfs_schemes_release(struct kobject *kobj)
1052 kfree(container_of(kobj, struct damon_sysfs_schemes, kobj));
1055 static struct kobj_attribute damon_sysfs_schemes_nr_attr =
1056 __ATTR_RW_MODE(nr_schemes, 0600);
1058 static struct attribute *damon_sysfs_schemes_attrs[] = {
1059 &damon_sysfs_schemes_nr_attr.attr,
1062 ATTRIBUTE_GROUPS(damon_sysfs_schemes);
1064 static struct kobj_type damon_sysfs_schemes_ktype = {
1065 .release = damon_sysfs_schemes_release,
1066 .sysfs_ops = &kobj_sysfs_ops,
1067 .default_groups = damon_sysfs_schemes_groups,
1071 * init region directory
1074 struct damon_sysfs_region {
1075 struct kobject kobj;
1076 unsigned long start;
1080 static struct damon_sysfs_region *damon_sysfs_region_alloc(
1081 unsigned long start,
1084 struct damon_sysfs_region *region = kmalloc(sizeof(*region),
1089 region->kobj = (struct kobject){};
1090 region->start = start;
1095 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
1098 struct damon_sysfs_region *region = container_of(kobj,
1099 struct damon_sysfs_region, kobj);
1101 return sysfs_emit(buf, "%lu\n", region->start);
1104 static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
1105 const char *buf, size_t count)
1107 struct damon_sysfs_region *region = container_of(kobj,
1108 struct damon_sysfs_region, kobj);
1109 int err = kstrtoul(buf, 0, ®ion->start);
1116 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
1119 struct damon_sysfs_region *region = container_of(kobj,
1120 struct damon_sysfs_region, kobj);
1122 return sysfs_emit(buf, "%lu\n", region->end);
1125 static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
1126 const char *buf, size_t count)
1128 struct damon_sysfs_region *region = container_of(kobj,
1129 struct damon_sysfs_region, kobj);
1130 int err = kstrtoul(buf, 0, ®ion->end);
1137 static void damon_sysfs_region_release(struct kobject *kobj)
1139 kfree(container_of(kobj, struct damon_sysfs_region, kobj));
1142 static struct kobj_attribute damon_sysfs_region_start_attr =
1143 __ATTR_RW_MODE(start, 0600);
1145 static struct kobj_attribute damon_sysfs_region_end_attr =
1146 __ATTR_RW_MODE(end, 0600);
1148 static struct attribute *damon_sysfs_region_attrs[] = {
1149 &damon_sysfs_region_start_attr.attr,
1150 &damon_sysfs_region_end_attr.attr,
1153 ATTRIBUTE_GROUPS(damon_sysfs_region);
1155 static struct kobj_type damon_sysfs_region_ktype = {
1156 .release = damon_sysfs_region_release,
1157 .sysfs_ops = &kobj_sysfs_ops,
1158 .default_groups = damon_sysfs_region_groups,
1162 * init_regions directory
1165 struct damon_sysfs_regions {
1166 struct kobject kobj;
1167 struct damon_sysfs_region **regions_arr;
1171 static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
1173 return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
1176 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
1178 struct damon_sysfs_region **regions_arr = regions->regions_arr;
1181 for (i = 0; i < regions->nr; i++)
1182 kobject_put(®ions_arr[i]->kobj);
1185 regions->regions_arr = NULL;
1188 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
1191 struct damon_sysfs_region **regions_arr, *region;
1194 damon_sysfs_regions_rm_dirs(regions);
1198 regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
1199 GFP_KERNEL | __GFP_NOWARN);
1202 regions->regions_arr = regions_arr;
1204 for (i = 0; i < nr_regions; i++) {
1205 region = damon_sysfs_region_alloc(0, 0);
1207 damon_sysfs_regions_rm_dirs(regions);
1211 err = kobject_init_and_add(®ion->kobj,
1212 &damon_sysfs_region_ktype, ®ions->kobj,
1215 kobject_put(®ion->kobj);
1216 damon_sysfs_regions_rm_dirs(regions);
1220 regions_arr[i] = region;
1226 static ssize_t nr_regions_show(struct kobject *kobj,
1227 struct kobj_attribute *attr, char *buf)
1229 struct damon_sysfs_regions *regions = container_of(kobj,
1230 struct damon_sysfs_regions, kobj);
1232 return sysfs_emit(buf, "%d\n", regions->nr);
1235 static ssize_t nr_regions_store(struct kobject *kobj,
1236 struct kobj_attribute *attr, const char *buf, size_t count)
1238 struct damon_sysfs_regions *regions = container_of(kobj,
1239 struct damon_sysfs_regions, kobj);
1240 int nr, err = kstrtoint(buf, 0, &nr);
1247 if (!mutex_trylock(&damon_sysfs_lock))
1249 err = damon_sysfs_regions_add_dirs(regions, nr);
1250 mutex_unlock(&damon_sysfs_lock);
1257 static void damon_sysfs_regions_release(struct kobject *kobj)
1259 kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
1262 static struct kobj_attribute damon_sysfs_regions_nr_attr =
1263 __ATTR_RW_MODE(nr_regions, 0600);
1265 static struct attribute *damon_sysfs_regions_attrs[] = {
1266 &damon_sysfs_regions_nr_attr.attr,
1269 ATTRIBUTE_GROUPS(damon_sysfs_regions);
1271 static struct kobj_type damon_sysfs_regions_ktype = {
1272 .release = damon_sysfs_regions_release,
1273 .sysfs_ops = &kobj_sysfs_ops,
1274 .default_groups = damon_sysfs_regions_groups,
1281 struct damon_sysfs_target {
1282 struct kobject kobj;
1283 struct damon_sysfs_regions *regions;
1287 static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
1289 return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
1292 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
1294 struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
1300 err = kobject_init_and_add(®ions->kobj, &damon_sysfs_regions_ktype,
1301 &target->kobj, "regions");
1303 kobject_put(®ions->kobj);
1305 target->regions = regions;
1309 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
1311 damon_sysfs_regions_rm_dirs(target->regions);
1312 kobject_put(&target->regions->kobj);
1315 static ssize_t pid_target_show(struct kobject *kobj,
1316 struct kobj_attribute *attr, char *buf)
1318 struct damon_sysfs_target *target = container_of(kobj,
1319 struct damon_sysfs_target, kobj);
1321 return sysfs_emit(buf, "%d\n", target->pid);
1324 static ssize_t pid_target_store(struct kobject *kobj,
1325 struct kobj_attribute *attr, const char *buf, size_t count)
1327 struct damon_sysfs_target *target = container_of(kobj,
1328 struct damon_sysfs_target, kobj);
1329 int err = kstrtoint(buf, 0, &target->pid);
1336 static void damon_sysfs_target_release(struct kobject *kobj)
1338 kfree(container_of(kobj, struct damon_sysfs_target, kobj));
1341 static struct kobj_attribute damon_sysfs_target_pid_attr =
1342 __ATTR_RW_MODE(pid_target, 0600);
1344 static struct attribute *damon_sysfs_target_attrs[] = {
1345 &damon_sysfs_target_pid_attr.attr,
1348 ATTRIBUTE_GROUPS(damon_sysfs_target);
1350 static struct kobj_type damon_sysfs_target_ktype = {
1351 .release = damon_sysfs_target_release,
1352 .sysfs_ops = &kobj_sysfs_ops,
1353 .default_groups = damon_sysfs_target_groups,
1360 struct damon_sysfs_targets {
1361 struct kobject kobj;
1362 struct damon_sysfs_target **targets_arr;
1366 static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
1368 return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
1371 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
1373 struct damon_sysfs_target **targets_arr = targets->targets_arr;
1376 for (i = 0; i < targets->nr; i++) {
1377 damon_sysfs_target_rm_dirs(targets_arr[i]);
1378 kobject_put(&targets_arr[i]->kobj);
1382 targets->targets_arr = NULL;
1385 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
1388 struct damon_sysfs_target **targets_arr, *target;
1391 damon_sysfs_targets_rm_dirs(targets);
1395 targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
1396 GFP_KERNEL | __GFP_NOWARN);
1399 targets->targets_arr = targets_arr;
1401 for (i = 0; i < nr_targets; i++) {
1402 target = damon_sysfs_target_alloc();
1404 damon_sysfs_targets_rm_dirs(targets);
1408 err = kobject_init_and_add(&target->kobj,
1409 &damon_sysfs_target_ktype, &targets->kobj,
1414 err = damon_sysfs_target_add_dirs(target);
1418 targets_arr[i] = target;
1424 damon_sysfs_targets_rm_dirs(targets);
1425 kobject_put(&target->kobj);
1429 static ssize_t nr_targets_show(struct kobject *kobj,
1430 struct kobj_attribute *attr, char *buf)
1432 struct damon_sysfs_targets *targets = container_of(kobj,
1433 struct damon_sysfs_targets, kobj);
1435 return sysfs_emit(buf, "%d\n", targets->nr);
1438 static ssize_t nr_targets_store(struct kobject *kobj,
1439 struct kobj_attribute *attr, const char *buf, size_t count)
1441 struct damon_sysfs_targets *targets = container_of(kobj,
1442 struct damon_sysfs_targets, kobj);
1443 int nr, err = kstrtoint(buf, 0, &nr);
1450 if (!mutex_trylock(&damon_sysfs_lock))
1452 err = damon_sysfs_targets_add_dirs(targets, nr);
1453 mutex_unlock(&damon_sysfs_lock);
1460 static void damon_sysfs_targets_release(struct kobject *kobj)
1462 kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
1465 static struct kobj_attribute damon_sysfs_targets_nr_attr =
1466 __ATTR_RW_MODE(nr_targets, 0600);
1468 static struct attribute *damon_sysfs_targets_attrs[] = {
1469 &damon_sysfs_targets_nr_attr.attr,
1472 ATTRIBUTE_GROUPS(damon_sysfs_targets);
1474 static struct kobj_type damon_sysfs_targets_ktype = {
1475 .release = damon_sysfs_targets_release,
1476 .sysfs_ops = &kobj_sysfs_ops,
1477 .default_groups = damon_sysfs_targets_groups,
1481 * intervals directory
1484 struct damon_sysfs_intervals {
1485 struct kobject kobj;
1486 unsigned long sample_us;
1487 unsigned long aggr_us;
1488 unsigned long update_us;
1491 static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
1492 unsigned long sample_us, unsigned long aggr_us,
1493 unsigned long update_us)
1495 struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
1501 intervals->kobj = (struct kobject){};
1502 intervals->sample_us = sample_us;
1503 intervals->aggr_us = aggr_us;
1504 intervals->update_us = update_us;
1508 static ssize_t sample_us_show(struct kobject *kobj,
1509 struct kobj_attribute *attr, char *buf)
1511 struct damon_sysfs_intervals *intervals = container_of(kobj,
1512 struct damon_sysfs_intervals, kobj);
1514 return sysfs_emit(buf, "%lu\n", intervals->sample_us);
1517 static ssize_t sample_us_store(struct kobject *kobj,
1518 struct kobj_attribute *attr, const char *buf, size_t count)
1520 struct damon_sysfs_intervals *intervals = container_of(kobj,
1521 struct damon_sysfs_intervals, kobj);
1523 int err = kstrtoul(buf, 0, &us);
1528 intervals->sample_us = us;
1532 static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
1535 struct damon_sysfs_intervals *intervals = container_of(kobj,
1536 struct damon_sysfs_intervals, kobj);
1538 return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
1541 static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
1542 const char *buf, size_t count)
1544 struct damon_sysfs_intervals *intervals = container_of(kobj,
1545 struct damon_sysfs_intervals, kobj);
1547 int err = kstrtoul(buf, 0, &us);
1552 intervals->aggr_us = us;
1556 static ssize_t update_us_show(struct kobject *kobj,
1557 struct kobj_attribute *attr, char *buf)
1559 struct damon_sysfs_intervals *intervals = container_of(kobj,
1560 struct damon_sysfs_intervals, kobj);
1562 return sysfs_emit(buf, "%lu\n", intervals->update_us);
1565 static ssize_t update_us_store(struct kobject *kobj,
1566 struct kobj_attribute *attr, const char *buf, size_t count)
1568 struct damon_sysfs_intervals *intervals = container_of(kobj,
1569 struct damon_sysfs_intervals, kobj);
1571 int err = kstrtoul(buf, 0, &us);
1576 intervals->update_us = us;
1580 static void damon_sysfs_intervals_release(struct kobject *kobj)
1582 kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
1585 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
1586 __ATTR_RW_MODE(sample_us, 0600);
1588 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
1589 __ATTR_RW_MODE(aggr_us, 0600);
1591 static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
1592 __ATTR_RW_MODE(update_us, 0600);
1594 static struct attribute *damon_sysfs_intervals_attrs[] = {
1595 &damon_sysfs_intervals_sample_us_attr.attr,
1596 &damon_sysfs_intervals_aggr_us_attr.attr,
1597 &damon_sysfs_intervals_update_us_attr.attr,
1600 ATTRIBUTE_GROUPS(damon_sysfs_intervals);
1602 static struct kobj_type damon_sysfs_intervals_ktype = {
1603 .release = damon_sysfs_intervals_release,
1604 .sysfs_ops = &kobj_sysfs_ops,
1605 .default_groups = damon_sysfs_intervals_groups,
1609 * monitoring_attrs directory
1612 struct damon_sysfs_attrs {
1613 struct kobject kobj;
1614 struct damon_sysfs_intervals *intervals;
1615 struct damon_sysfs_ul_range *nr_regions_range;
1618 static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
1620 struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
1624 attrs->kobj = (struct kobject){};
1628 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
1630 struct damon_sysfs_intervals *intervals;
1631 struct damon_sysfs_ul_range *nr_regions_range;
1634 intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
1638 err = kobject_init_and_add(&intervals->kobj,
1639 &damon_sysfs_intervals_ktype, &attrs->kobj,
1642 goto put_intervals_out;
1643 attrs->intervals = intervals;
1645 nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
1646 if (!nr_regions_range) {
1648 goto put_intervals_out;
1651 err = kobject_init_and_add(&nr_regions_range->kobj,
1652 &damon_sysfs_ul_range_ktype, &attrs->kobj,
1655 goto put_nr_regions_intervals_out;
1656 attrs->nr_regions_range = nr_regions_range;
1659 put_nr_regions_intervals_out:
1660 kobject_put(&nr_regions_range->kobj);
1661 attrs->nr_regions_range = NULL;
1663 kobject_put(&intervals->kobj);
1664 attrs->intervals = NULL;
1668 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
1670 kobject_put(&attrs->nr_regions_range->kobj);
1671 kobject_put(&attrs->intervals->kobj);
1674 static void damon_sysfs_attrs_release(struct kobject *kobj)
1676 kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
1679 static struct attribute *damon_sysfs_attrs_attrs[] = {
1682 ATTRIBUTE_GROUPS(damon_sysfs_attrs);
1684 static struct kobj_type damon_sysfs_attrs_ktype = {
1685 .release = damon_sysfs_attrs_release,
1686 .sysfs_ops = &kobj_sysfs_ops,
1687 .default_groups = damon_sysfs_attrs_groups,
1694 /* This should match with enum damon_ops_id */
1695 static const char * const damon_sysfs_ops_strs[] = {
1701 struct damon_sysfs_context {
1702 struct kobject kobj;
1703 enum damon_ops_id ops_id;
1704 struct damon_sysfs_attrs *attrs;
1705 struct damon_sysfs_targets *targets;
1706 struct damon_sysfs_schemes *schemes;
1709 static struct damon_sysfs_context *damon_sysfs_context_alloc(
1710 enum damon_ops_id ops_id)
1712 struct damon_sysfs_context *context = kmalloc(sizeof(*context),
1717 context->kobj = (struct kobject){};
1718 context->ops_id = ops_id;
1722 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
1724 struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
1729 err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
1730 &context->kobj, "monitoring_attrs");
1733 err = damon_sysfs_attrs_add_dirs(attrs);
1736 context->attrs = attrs;
1740 kobject_put(&attrs->kobj);
1744 static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
1746 struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
1751 err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
1752 &context->kobj, "targets");
1754 kobject_put(&targets->kobj);
1757 context->targets = targets;
1761 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
1763 struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
1768 err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
1769 &context->kobj, "schemes");
1771 kobject_put(&schemes->kobj);
1774 context->schemes = schemes;
1778 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
1782 err = damon_sysfs_context_set_attrs(context);
1786 err = damon_sysfs_context_set_targets(context);
1790 err = damon_sysfs_context_set_schemes(context);
1792 goto put_targets_attrs_out;
1795 put_targets_attrs_out:
1796 kobject_put(&context->targets->kobj);
1797 context->targets = NULL;
1799 kobject_put(&context->attrs->kobj);
1800 context->attrs = NULL;
1804 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
1806 damon_sysfs_attrs_rm_dirs(context->attrs);
1807 kobject_put(&context->attrs->kobj);
1808 damon_sysfs_targets_rm_dirs(context->targets);
1809 kobject_put(&context->targets->kobj);
1810 damon_sysfs_schemes_rm_dirs(context->schemes);
1811 kobject_put(&context->schemes->kobj);
1814 static ssize_t avail_operations_show(struct kobject *kobj,
1815 struct kobj_attribute *attr, char *buf)
1817 enum damon_ops_id id;
1820 for (id = 0; id < NR_DAMON_OPS; id++) {
1821 if (!damon_is_registered_ops(id))
1823 len += sysfs_emit_at(buf, len, "%s\n",
1824 damon_sysfs_ops_strs[id]);
1829 static ssize_t operations_show(struct kobject *kobj,
1830 struct kobj_attribute *attr, char *buf)
1832 struct damon_sysfs_context *context = container_of(kobj,
1833 struct damon_sysfs_context, kobj);
1835 return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
1838 static ssize_t operations_store(struct kobject *kobj,
1839 struct kobj_attribute *attr, const char *buf, size_t count)
1841 struct damon_sysfs_context *context = container_of(kobj,
1842 struct damon_sysfs_context, kobj);
1843 enum damon_ops_id id;
1845 for (id = 0; id < NR_DAMON_OPS; id++) {
1846 if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
1847 context->ops_id = id;
1854 static void damon_sysfs_context_release(struct kobject *kobj)
1856 kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1859 static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
1860 __ATTR_RO_MODE(avail_operations, 0400);
1862 static struct kobj_attribute damon_sysfs_context_operations_attr =
1863 __ATTR_RW_MODE(operations, 0600);
1865 static struct attribute *damon_sysfs_context_attrs[] = {
1866 &damon_sysfs_context_avail_operations_attr.attr,
1867 &damon_sysfs_context_operations_attr.attr,
1870 ATTRIBUTE_GROUPS(damon_sysfs_context);
1872 static struct kobj_type damon_sysfs_context_ktype = {
1873 .release = damon_sysfs_context_release,
1874 .sysfs_ops = &kobj_sysfs_ops,
1875 .default_groups = damon_sysfs_context_groups,
1879 * contexts directory
1882 struct damon_sysfs_contexts {
1883 struct kobject kobj;
1884 struct damon_sysfs_context **contexts_arr;
1888 static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1890 return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1893 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1895 struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1898 for (i = 0; i < contexts->nr; i++) {
1899 damon_sysfs_context_rm_dirs(contexts_arr[i]);
1900 kobject_put(&contexts_arr[i]->kobj);
1903 kfree(contexts_arr);
1904 contexts->contexts_arr = NULL;
1907 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1910 struct damon_sysfs_context **contexts_arr, *context;
1913 damon_sysfs_contexts_rm_dirs(contexts);
1917 contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1918 GFP_KERNEL | __GFP_NOWARN);
1921 contexts->contexts_arr = contexts_arr;
1923 for (i = 0; i < nr_contexts; i++) {
1924 context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1926 damon_sysfs_contexts_rm_dirs(contexts);
1930 err = kobject_init_and_add(&context->kobj,
1931 &damon_sysfs_context_ktype, &contexts->kobj,
1936 err = damon_sysfs_context_add_dirs(context);
1940 contexts_arr[i] = context;
1946 damon_sysfs_contexts_rm_dirs(contexts);
1947 kobject_put(&context->kobj);
1951 static ssize_t nr_contexts_show(struct kobject *kobj,
1952 struct kobj_attribute *attr, char *buf)
1954 struct damon_sysfs_contexts *contexts = container_of(kobj,
1955 struct damon_sysfs_contexts, kobj);
1957 return sysfs_emit(buf, "%d\n", contexts->nr);
1960 static ssize_t nr_contexts_store(struct kobject *kobj,
1961 struct kobj_attribute *attr, const char *buf, size_t count)
1963 struct damon_sysfs_contexts *contexts = container_of(kobj,
1964 struct damon_sysfs_contexts, kobj);
1967 err = kstrtoint(buf, 0, &nr);
1970 /* TODO: support multiple contexts per kdamond */
1971 if (nr < 0 || 1 < nr)
1974 if (!mutex_trylock(&damon_sysfs_lock))
1976 err = damon_sysfs_contexts_add_dirs(contexts, nr);
1977 mutex_unlock(&damon_sysfs_lock);
1984 static void damon_sysfs_contexts_release(struct kobject *kobj)
1986 kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1989 static struct kobj_attribute damon_sysfs_contexts_nr_attr
1990 = __ATTR_RW_MODE(nr_contexts, 0600);
1992 static struct attribute *damon_sysfs_contexts_attrs[] = {
1993 &damon_sysfs_contexts_nr_attr.attr,
1996 ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1998 static struct kobj_type damon_sysfs_contexts_ktype = {
1999 .release = damon_sysfs_contexts_release,
2000 .sysfs_ops = &kobj_sysfs_ops,
2001 .default_groups = damon_sysfs_contexts_groups,
2008 struct damon_sysfs_kdamond {
2009 struct kobject kobj;
2010 struct damon_sysfs_contexts *contexts;
2011 struct damon_ctx *damon_ctx;
2014 static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
2016 return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
2019 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
2021 struct damon_sysfs_contexts *contexts;
2024 contexts = damon_sysfs_contexts_alloc();
2028 err = kobject_init_and_add(&contexts->kobj,
2029 &damon_sysfs_contexts_ktype, &kdamond->kobj,
2032 kobject_put(&contexts->kobj);
2035 kdamond->contexts = contexts;
2040 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
2042 damon_sysfs_contexts_rm_dirs(kdamond->contexts);
2043 kobject_put(&kdamond->contexts->kobj);
2046 static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
2050 mutex_lock(&ctx->kdamond_lock);
2051 running = ctx->kdamond != NULL;
2052 mutex_unlock(&ctx->kdamond_lock);
2057 * enum damon_sysfs_cmd - Commands for a specific kdamond.
2059 enum damon_sysfs_cmd {
2060 /* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
2062 /* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
2063 DAMON_SYSFS_CMD_OFF,
2064 /* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
2065 DAMON_SYSFS_CMD_COMMIT,
2067 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
2070 DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
2072 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
2074 NR_DAMON_SYSFS_CMDS,
2077 /* Should match with enum damon_sysfs_cmd */
2078 static const char * const damon_sysfs_cmd_strs[] = {
2082 "update_schemes_stats",
2086 * struct damon_sysfs_cmd_request - A request to the DAMON callback.
2087 * @cmd: The command that needs to be handled by the callback.
2088 * @kdamond: The kobject wrapper that associated to the kdamond thread.
2090 * This structure represents a sysfs command request that need to access some
2091 * DAMON context-internal data. Because DAMON context-internal data can be
2092 * safely accessed from DAMON callbacks without additional synchronization, the
2093 * request will be handled by the DAMON callback. None-``NULL`` @kdamond means
2094 * the request is valid.
2096 struct damon_sysfs_cmd_request {
2097 enum damon_sysfs_cmd cmd;
2098 struct damon_sysfs_kdamond *kdamond;
2101 /* Current DAMON callback request. Protected by damon_sysfs_lock. */
2102 static struct damon_sysfs_cmd_request damon_sysfs_cmd_request;
2104 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
2107 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2108 struct damon_sysfs_kdamond, kobj);
2109 struct damon_ctx *ctx = kdamond->damon_ctx;
2115 running = damon_sysfs_ctx_running(ctx);
2117 return sysfs_emit(buf, "%s\n", running ?
2118 damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
2119 damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
2122 static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
2123 struct damon_sysfs_attrs *sys_attrs)
2125 struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
2126 struct damon_sysfs_ul_range *sys_nr_regions =
2127 sys_attrs->nr_regions_range;
2129 return damon_set_attrs(ctx, sys_intervals->sample_us,
2130 sys_intervals->aggr_us, sys_intervals->update_us,
2131 sys_nr_regions->min, sys_nr_regions->max);
2134 static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
2136 struct damon_target *t, *next;
2138 damon_for_each_target_safe(t, next, ctx) {
2139 if (ctx->ops.id == DAMON_OPS_VADDR ||
2140 ctx->ops.id == DAMON_OPS_FVADDR)
2142 damon_destroy_target(t);
2146 static int damon_sysfs_set_regions(struct damon_target *t,
2147 struct damon_sysfs_regions *sysfs_regions)
2149 struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
2150 sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
2151 int i, err = -EINVAL;
2155 for (i = 0; i < sysfs_regions->nr; i++) {
2156 struct damon_sysfs_region *sys_region =
2157 sysfs_regions->regions_arr[i];
2159 if (sys_region->start > sys_region->end)
2162 ranges[i].start = sys_region->start;
2163 ranges[i].end = sys_region->end;
2166 if (ranges[i - 1].end > ranges[i].start)
2169 err = damon_set_regions(t, ranges, sysfs_regions->nr);
2176 static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
2177 struct damon_ctx *ctx)
2179 struct damon_target *t = damon_new_target();
2184 if (ctx->ops.id == DAMON_OPS_VADDR ||
2185 ctx->ops.id == DAMON_OPS_FVADDR) {
2186 t->pid = find_get_pid(sys_target->pid);
2188 goto destroy_targets_out;
2190 damon_add_target(ctx, t);
2191 err = damon_sysfs_set_regions(t, sys_target->regions);
2193 goto destroy_targets_out;
2196 destroy_targets_out:
2197 damon_sysfs_destroy_targets(ctx);
2202 * Search a target in a context that corresponds to the sysfs target input.
2204 * Return: pointer to the target if found, NULL if not found, or negative
2205 * error code if the search failed.
2207 static struct damon_target *damon_sysfs_existing_target(
2208 struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
2211 struct damon_target *t;
2213 if (ctx->ops.id == DAMON_OPS_PADDR) {
2214 /* Up to only one target for paddr could exist */
2215 damon_for_each_target(t, ctx)
2220 /* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
2221 pid = find_get_pid(sys_target->pid);
2223 return ERR_PTR(-EINVAL);
2224 damon_for_each_target(t, ctx) {
2225 if (t->pid == pid) {
2234 static int damon_sysfs_set_targets(struct damon_ctx *ctx,
2235 struct damon_sysfs_targets *sysfs_targets)
2239 /* Multiple physical address space monitoring targets makes no sense */
2240 if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
2243 for (i = 0; i < sysfs_targets->nr; i++) {
2244 struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
2245 struct damon_target *t = damon_sysfs_existing_target(st, ctx);
2250 err = damon_sysfs_add_target(st, ctx);
2252 err = damon_sysfs_set_regions(t, st->regions);
2259 static struct damos *damon_sysfs_mk_scheme(
2260 struct damon_sysfs_scheme *sysfs_scheme)
2262 struct damon_sysfs_access_pattern *pattern =
2263 sysfs_scheme->access_pattern;
2264 struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
2265 struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
2266 struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
2267 struct damos_quota quota = {
2268 .ms = sysfs_quotas->ms,
2269 .sz = sysfs_quotas->sz,
2270 .reset_interval = sysfs_quotas->reset_interval_ms,
2271 .weight_sz = sysfs_weights->sz,
2272 .weight_nr_accesses = sysfs_weights->nr_accesses,
2273 .weight_age = sysfs_weights->age,
2275 struct damos_watermarks wmarks = {
2276 .metric = sysfs_wmarks->metric,
2277 .interval = sysfs_wmarks->interval_us,
2278 .high = sysfs_wmarks->high,
2279 .mid = sysfs_wmarks->mid,
2280 .low = sysfs_wmarks->low,
2283 return damon_new_scheme(pattern->sz->min, pattern->sz->max,
2284 pattern->nr_accesses->min, pattern->nr_accesses->max,
2285 pattern->age->min, pattern->age->max,
2286 sysfs_scheme->action, "a, &wmarks);
2289 static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
2290 struct damon_sysfs_schemes *sysfs_schemes)
2294 for (i = 0; i < sysfs_schemes->nr; i++) {
2295 struct damos *scheme, *next;
2297 scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
2299 damon_for_each_scheme_safe(scheme, next, ctx)
2300 damon_destroy_scheme(scheme);
2303 damon_add_scheme(ctx, scheme);
2308 static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
2310 struct damon_target *t, *next;
2312 if (ctx->ops.id != DAMON_OPS_VADDR && ctx->ops.id != DAMON_OPS_FVADDR)
2315 mutex_lock(&ctx->kdamond_lock);
2316 damon_for_each_target_safe(t, next, ctx) {
2318 damon_destroy_target(t);
2320 mutex_unlock(&ctx->kdamond_lock);
2324 * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
2325 * @kdamond: The kobject wrapper that associated to the kdamond thread.
2327 * This function reads the schemes stats of specific kdamond and update the
2328 * related values for sysfs files. This function should be called from DAMON
2329 * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON
2330 * contexts-internal data and DAMON sysfs variables.
2332 static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
2334 struct damon_ctx *ctx = kdamond->damon_ctx;
2335 struct damon_sysfs_schemes *sysfs_schemes;
2336 struct damos *scheme;
2337 int schemes_idx = 0;
2341 sysfs_schemes = kdamond->contexts->contexts_arr[0]->schemes;
2342 damon_for_each_scheme(scheme, ctx) {
2343 struct damon_sysfs_stats *sysfs_stats;
2345 sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
2346 sysfs_stats->nr_tried = scheme->stat.nr_tried;
2347 sysfs_stats->sz_tried = scheme->stat.sz_tried;
2348 sysfs_stats->nr_applied = scheme->stat.nr_applied;
2349 sysfs_stats->sz_applied = scheme->stat.sz_applied;
2350 sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds;
2355 static inline bool damon_sysfs_kdamond_running(
2356 struct damon_sysfs_kdamond *kdamond)
2358 return kdamond->damon_ctx &&
2359 damon_sysfs_ctx_running(kdamond->damon_ctx);
2363 * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
2364 * @kdamond: The kobject wrapper for the associated kdamond.
2366 * If the sysfs input is wrong, the kdamond will be terminated.
2368 static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
2370 struct damon_ctx *ctx = kdamond->damon_ctx;
2371 struct damon_sysfs_context *sys_ctx;
2374 if (!damon_sysfs_kdamond_running(kdamond))
2376 /* TODO: Support multiple contexts per kdamond */
2377 if (kdamond->contexts->nr != 1)
2380 sys_ctx = kdamond->contexts->contexts_arr[0];
2382 err = damon_select_ops(ctx, sys_ctx->ops_id);
2385 err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2388 err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2391 err = damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2398 * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
2399 * @c: The DAMON context of the callback.
2401 * This function is periodically called back from the kdamond thread for @c.
2402 * Then, it checks if there is a waiting DAMON sysfs request and handles it.
2404 static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
2406 struct damon_sysfs_kdamond *kdamond;
2409 /* avoid deadlock due to concurrent state_store('off') */
2410 if (!mutex_trylock(&damon_sysfs_lock))
2412 kdamond = damon_sysfs_cmd_request.kdamond;
2413 if (!kdamond || kdamond->damon_ctx != c)
2415 switch (damon_sysfs_cmd_request.cmd) {
2416 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
2417 err = damon_sysfs_upd_schemes_stats(kdamond);
2419 case DAMON_SYSFS_CMD_COMMIT:
2420 err = damon_sysfs_commit_input(kdamond);
2425 /* Mark the request as invalid now. */
2426 damon_sysfs_cmd_request.kdamond = NULL;
2428 mutex_unlock(&damon_sysfs_lock);
2432 static struct damon_ctx *damon_sysfs_build_ctx(
2433 struct damon_sysfs_context *sys_ctx)
2435 struct damon_ctx *ctx = damon_new_ctx();
2439 return ERR_PTR(-ENOMEM);
2441 err = damon_select_ops(ctx, sys_ctx->ops_id);
2444 err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2447 err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2450 err = damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2454 ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback;
2455 ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback;
2456 ctx->callback.before_terminate = damon_sysfs_before_terminate;
2460 damon_destroy_ctx(ctx);
2461 return ERR_PTR(err);
2464 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
2466 struct damon_ctx *ctx;
2469 if (kdamond->damon_ctx &&
2470 damon_sysfs_ctx_running(kdamond->damon_ctx))
2472 if (damon_sysfs_cmd_request.kdamond == kdamond)
2474 /* TODO: support multiple contexts per kdamond */
2475 if (kdamond->contexts->nr != 1)
2478 if (kdamond->damon_ctx)
2479 damon_destroy_ctx(kdamond->damon_ctx);
2480 kdamond->damon_ctx = NULL;
2482 ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
2484 return PTR_ERR(ctx);
2485 err = damon_start(&ctx, 1, false);
2487 damon_destroy_ctx(ctx);
2490 kdamond->damon_ctx = ctx;
2494 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
2496 if (!kdamond->damon_ctx)
2498 return damon_stop(&kdamond->damon_ctx, 1);
2500 * To allow users show final monitoring results of already turned-off
2501 * DAMON, we free kdamond->damon_ctx in next
2502 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
2507 * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
2508 * @cmd: The command to handle.
2509 * @kdamond: The kobject wrapper for the associated kdamond.
2511 * This function handles a DAMON sysfs command for a kdamond. For commands
2512 * that need to access running DAMON context-internal data, it requests
2513 * handling of the command to the DAMON callback
2514 * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
2515 * or the context is completed.
2517 * Return: 0 on success, negative error code otherwise.
2519 static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
2520 struct damon_sysfs_kdamond *kdamond)
2522 bool need_wait = true;
2524 /* Handle commands that doesn't access DAMON context-internal data */
2526 case DAMON_SYSFS_CMD_ON:
2527 return damon_sysfs_turn_damon_on(kdamond);
2528 case DAMON_SYSFS_CMD_OFF:
2529 return damon_sysfs_turn_damon_off(kdamond);
2534 /* Pass the command to DAMON callback for safe DAMON context access */
2535 if (damon_sysfs_cmd_request.kdamond)
2537 if (!damon_sysfs_kdamond_running(kdamond))
2539 damon_sysfs_cmd_request.cmd = cmd;
2540 damon_sysfs_cmd_request.kdamond = kdamond;
2543 * wait until damon_sysfs_cmd_request_callback() handles the request
2544 * from kdamond context
2546 mutex_unlock(&damon_sysfs_lock);
2548 schedule_timeout_idle(msecs_to_jiffies(100));
2549 if (!mutex_trylock(&damon_sysfs_lock))
2551 if (!damon_sysfs_cmd_request.kdamond) {
2552 /* damon_sysfs_cmd_request_callback() handled */
2554 } else if (!damon_sysfs_kdamond_running(kdamond)) {
2555 /* kdamond has already finished */
2557 damon_sysfs_cmd_request.kdamond = NULL;
2559 mutex_unlock(&damon_sysfs_lock);
2561 mutex_lock(&damon_sysfs_lock);
2565 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
2566 const char *buf, size_t count)
2568 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2569 struct damon_sysfs_kdamond, kobj);
2570 enum damon_sysfs_cmd cmd;
2571 ssize_t ret = -EINVAL;
2573 if (!mutex_trylock(&damon_sysfs_lock))
2575 for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
2576 if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
2577 ret = damon_sysfs_handle_cmd(cmd, kdamond);
2581 mutex_unlock(&damon_sysfs_lock);
2587 static ssize_t pid_show(struct kobject *kobj,
2588 struct kobj_attribute *attr, char *buf)
2590 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2591 struct damon_sysfs_kdamond, kobj);
2592 struct damon_ctx *ctx;
2595 if (!mutex_trylock(&damon_sysfs_lock))
2597 ctx = kdamond->damon_ctx;
2602 mutex_lock(&ctx->kdamond_lock);
2606 pid = ctx->kdamond->pid;
2607 mutex_unlock(&ctx->kdamond_lock);
2609 mutex_unlock(&damon_sysfs_lock);
2610 return sysfs_emit(buf, "%d\n", pid);
2613 static void damon_sysfs_kdamond_release(struct kobject *kobj)
2615 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2616 struct damon_sysfs_kdamond, kobj);
2618 if (kdamond->damon_ctx)
2619 damon_destroy_ctx(kdamond->damon_ctx);
2623 static struct kobj_attribute damon_sysfs_kdamond_state_attr =
2624 __ATTR_RW_MODE(state, 0600);
2626 static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
2627 __ATTR_RO_MODE(pid, 0400);
2629 static struct attribute *damon_sysfs_kdamond_attrs[] = {
2630 &damon_sysfs_kdamond_state_attr.attr,
2631 &damon_sysfs_kdamond_pid_attr.attr,
2634 ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
2636 static struct kobj_type damon_sysfs_kdamond_ktype = {
2637 .release = damon_sysfs_kdamond_release,
2638 .sysfs_ops = &kobj_sysfs_ops,
2639 .default_groups = damon_sysfs_kdamond_groups,
2643 * kdamonds directory
2646 struct damon_sysfs_kdamonds {
2647 struct kobject kobj;
2648 struct damon_sysfs_kdamond **kdamonds_arr;
2652 static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
2654 return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
2657 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
2659 struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
2662 for (i = 0; i < kdamonds->nr; i++) {
2663 damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
2664 kobject_put(&kdamonds_arr[i]->kobj);
2667 kfree(kdamonds_arr);
2668 kdamonds->kdamonds_arr = NULL;
2671 static int damon_sysfs_nr_running_ctxs(struct damon_sysfs_kdamond **kdamonds,
2674 int nr_running_ctxs = 0;
2677 for (i = 0; i < nr_kdamonds; i++) {
2678 struct damon_ctx *ctx = kdamonds[i]->damon_ctx;
2682 mutex_lock(&ctx->kdamond_lock);
2685 mutex_unlock(&ctx->kdamond_lock);
2687 return nr_running_ctxs;
2690 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
2693 struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
2696 if (damon_sysfs_nr_running_ctxs(kdamonds->kdamonds_arr, kdamonds->nr))
2699 for (i = 0; i < kdamonds->nr; i++) {
2700 if (damon_sysfs_cmd_request.kdamond ==
2701 kdamonds->kdamonds_arr[i])
2705 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2709 kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
2710 GFP_KERNEL | __GFP_NOWARN);
2713 kdamonds->kdamonds_arr = kdamonds_arr;
2715 for (i = 0; i < nr_kdamonds; i++) {
2716 kdamond = damon_sysfs_kdamond_alloc();
2718 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2722 err = kobject_init_and_add(&kdamond->kobj,
2723 &damon_sysfs_kdamond_ktype, &kdamonds->kobj,
2728 err = damon_sysfs_kdamond_add_dirs(kdamond);
2732 kdamonds_arr[i] = kdamond;
2738 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2739 kobject_put(&kdamond->kobj);
2743 static ssize_t nr_kdamonds_show(struct kobject *kobj,
2744 struct kobj_attribute *attr, char *buf)
2746 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2747 struct damon_sysfs_kdamonds, kobj);
2749 return sysfs_emit(buf, "%d\n", kdamonds->nr);
2752 static ssize_t nr_kdamonds_store(struct kobject *kobj,
2753 struct kobj_attribute *attr, const char *buf, size_t count)
2755 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2756 struct damon_sysfs_kdamonds, kobj);
2759 err = kstrtoint(buf, 0, &nr);
2765 if (!mutex_trylock(&damon_sysfs_lock))
2767 err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2768 mutex_unlock(&damon_sysfs_lock);
2775 static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2777 kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2780 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2781 __ATTR_RW_MODE(nr_kdamonds, 0600);
2783 static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2784 &damon_sysfs_kdamonds_nr_attr.attr,
2787 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2789 static struct kobj_type damon_sysfs_kdamonds_ktype = {
2790 .release = damon_sysfs_kdamonds_release,
2791 .sysfs_ops = &kobj_sysfs_ops,
2792 .default_groups = damon_sysfs_kdamonds_groups,
2796 * damon user interface directory
2799 struct damon_sysfs_ui_dir {
2800 struct kobject kobj;
2801 struct damon_sysfs_kdamonds *kdamonds;
2804 static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2806 return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2809 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2811 struct damon_sysfs_kdamonds *kdamonds;
2814 kdamonds = damon_sysfs_kdamonds_alloc();
2818 err = kobject_init_and_add(&kdamonds->kobj,
2819 &damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2822 kobject_put(&kdamonds->kobj);
2825 ui_dir->kdamonds = kdamonds;
2829 static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2831 kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2834 static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2837 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2839 static struct kobj_type damon_sysfs_ui_dir_ktype = {
2840 .release = damon_sysfs_ui_dir_release,
2841 .sysfs_ops = &kobj_sysfs_ops,
2842 .default_groups = damon_sysfs_ui_dir_groups,
2845 static int __init damon_sysfs_init(void)
2847 struct kobject *damon_sysfs_root;
2848 struct damon_sysfs_ui_dir *admin;
2851 damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2852 if (!damon_sysfs_root)
2855 admin = damon_sysfs_ui_dir_alloc();
2857 kobject_put(damon_sysfs_root);
2860 err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2861 damon_sysfs_root, "admin");
2864 err = damon_sysfs_ui_dir_add_dirs(admin);
2870 kobject_put(&admin->kobj);
2871 kobject_put(damon_sysfs_root);
2874 subsys_initcall(damon_sysfs_init);