1 // SPDX-License-Identifier: GPL-2.0
8 #define pr_fmt(fmt) "damon: " fmt
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/random.h>
14 #include <linux/slab.h>
16 #define CREATE_TRACE_POINTS
17 #include <trace/events/damon.h>
19 #ifdef CONFIG_DAMON_KUNIT_TEST
20 #undef DAMON_MIN_REGION
21 #define DAMON_MIN_REGION 1
24 /* Get a random number in [l, r) */
25 #define damon_rand(l, r) (l + prandom_u32_max(r - l))
27 static DEFINE_MUTEX(damon_lock);
28 static int nr_running_ctxs;
31 * Construct a damon_region struct
33 * Returns the pointer to the new struct if success, or NULL otherwise
35 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
37 struct damon_region *region;
39 region = kmalloc(sizeof(*region), GFP_KERNEL);
43 region->ar.start = start;
45 region->nr_accesses = 0;
46 INIT_LIST_HEAD(®ion->list);
52 * Add a region between two other regions
54 inline void damon_insert_region(struct damon_region *r,
55 struct damon_region *prev, struct damon_region *next,
56 struct damon_target *t)
58 __list_add(&r->list, &prev->list, &next->list);
62 void damon_add_region(struct damon_region *r, struct damon_target *t)
64 list_add_tail(&r->list, &t->regions_list);
68 static void damon_del_region(struct damon_region *r, struct damon_target *t)
74 static void damon_free_region(struct damon_region *r)
79 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
81 damon_del_region(r, t);
86 * Construct a damon_target struct
88 * Returns the pointer to the new struct if success, or NULL otherwise
90 struct damon_target *damon_new_target(unsigned long id)
92 struct damon_target *t;
94 t = kmalloc(sizeof(*t), GFP_KERNEL);
100 INIT_LIST_HEAD(&t->regions_list);
105 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
107 list_add_tail(&t->list, &ctx->adaptive_targets);
110 static void damon_del_target(struct damon_target *t)
115 void damon_free_target(struct damon_target *t)
117 struct damon_region *r, *next;
119 damon_for_each_region_safe(r, next, t)
120 damon_free_region(r);
124 void damon_destroy_target(struct damon_target *t)
127 damon_free_target(t);
130 unsigned int damon_nr_regions(struct damon_target *t)
132 return t->nr_regions;
135 struct damon_ctx *damon_new_ctx(void)
137 struct damon_ctx *ctx;
139 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
143 ctx->sample_interval = 5 * 1000;
144 ctx->aggr_interval = 100 * 1000;
145 ctx->primitive_update_interval = 60 * 1000 * 1000;
147 ktime_get_coarse_ts64(&ctx->last_aggregation);
148 ctx->last_primitive_update = ctx->last_aggregation;
150 mutex_init(&ctx->kdamond_lock);
152 ctx->min_nr_regions = 10;
153 ctx->max_nr_regions = 1000;
155 INIT_LIST_HEAD(&ctx->adaptive_targets);
160 static void damon_destroy_targets(struct damon_ctx *ctx)
162 struct damon_target *t, *next_t;
164 if (ctx->primitive.cleanup) {
165 ctx->primitive.cleanup(ctx);
169 damon_for_each_target_safe(t, next_t, ctx)
170 damon_destroy_target(t);
173 void damon_destroy_ctx(struct damon_ctx *ctx)
175 damon_destroy_targets(ctx);
180 * damon_set_targets() - Set monitoring targets.
181 * @ctx: monitoring context
182 * @ids: array of target ids
183 * @nr_ids: number of entries in @ids
185 * This function should not be called while the kdamond is running.
187 * Return: 0 on success, negative error code otherwise.
189 int damon_set_targets(struct damon_ctx *ctx,
190 unsigned long *ids, ssize_t nr_ids)
193 struct damon_target *t, *next;
195 damon_destroy_targets(ctx);
197 for (i = 0; i < nr_ids; i++) {
198 t = damon_new_target(ids[i]);
200 pr_err("Failed to alloc damon_target\n");
201 /* The caller should do cleanup of the ids itself */
202 damon_for_each_target_safe(t, next, ctx)
203 damon_destroy_target(t);
206 damon_add_target(ctx, t);
213 * damon_set_attrs() - Set attributes for the monitoring.
214 * @ctx: monitoring context
215 * @sample_int: time interval between samplings
216 * @aggr_int: time interval between aggregations
217 * @primitive_upd_int: time interval between monitoring primitive updates
218 * @min_nr_reg: minimal number of regions
219 * @max_nr_reg: maximum number of regions
221 * This function should not be called while the kdamond is running.
222 * Every time interval is in micro-seconds.
224 * Return: 0 on success, negative error code otherwise.
226 int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
227 unsigned long aggr_int, unsigned long primitive_upd_int,
228 unsigned long min_nr_reg, unsigned long max_nr_reg)
230 if (min_nr_reg < 3) {
231 pr_err("min_nr_regions (%lu) must be at least 3\n",
235 if (min_nr_reg > max_nr_reg) {
236 pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
237 min_nr_reg, max_nr_reg);
241 ctx->sample_interval = sample_int;
242 ctx->aggr_interval = aggr_int;
243 ctx->primitive_update_interval = primitive_upd_int;
244 ctx->min_nr_regions = min_nr_reg;
245 ctx->max_nr_regions = max_nr_reg;
251 * damon_nr_running_ctxs() - Return number of currently running contexts.
253 int damon_nr_running_ctxs(void)
257 mutex_lock(&damon_lock);
258 nr_ctxs = nr_running_ctxs;
259 mutex_unlock(&damon_lock);
264 /* Returns the size upper limit for each monitoring region */
265 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
267 struct damon_target *t;
268 struct damon_region *r;
269 unsigned long sz = 0;
271 damon_for_each_target(t, ctx) {
272 damon_for_each_region(r, t)
273 sz += r->ar.end - r->ar.start;
276 if (ctx->min_nr_regions)
277 sz /= ctx->min_nr_regions;
278 if (sz < DAMON_MIN_REGION)
279 sz = DAMON_MIN_REGION;
284 static bool damon_kdamond_running(struct damon_ctx *ctx)
288 mutex_lock(&ctx->kdamond_lock);
289 running = ctx->kdamond != NULL;
290 mutex_unlock(&ctx->kdamond_lock);
295 static int kdamond_fn(void *data);
298 * __damon_start() - Starts monitoring with given context.
299 * @ctx: monitoring context
301 * This function should be called while damon_lock is hold.
303 * Return: 0 on success, negative error code otherwise.
305 static int __damon_start(struct damon_ctx *ctx)
309 mutex_lock(&ctx->kdamond_lock);
312 ctx->kdamond_stop = false;
313 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
315 if (IS_ERR(ctx->kdamond)) {
316 err = PTR_ERR(ctx->kdamond);
320 mutex_unlock(&ctx->kdamond_lock);
326 * damon_start() - Starts the monitorings for a given group of contexts.
327 * @ctxs: an array of the pointers for contexts to start monitoring
328 * @nr_ctxs: size of @ctxs
330 * This function starts a group of monitoring threads for a group of monitoring
331 * contexts. One thread per each context is created and run in parallel. The
332 * caller should handle synchronization between the threads by itself. If a
333 * group of threads that created by other 'damon_start()' call is currently
334 * running, this function does nothing but returns -EBUSY.
336 * Return: 0 on success, negative error code otherwise.
338 int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
343 mutex_lock(&damon_lock);
344 if (nr_running_ctxs) {
345 mutex_unlock(&damon_lock);
349 for (i = 0; i < nr_ctxs; i++) {
350 err = __damon_start(ctxs[i]);
355 mutex_unlock(&damon_lock);
361 * __damon_stop() - Stops monitoring of given context.
362 * @ctx: monitoring context
364 * Return: 0 on success, negative error code otherwise.
366 static int __damon_stop(struct damon_ctx *ctx)
368 mutex_lock(&ctx->kdamond_lock);
370 ctx->kdamond_stop = true;
371 mutex_unlock(&ctx->kdamond_lock);
372 while (damon_kdamond_running(ctx))
373 usleep_range(ctx->sample_interval,
374 ctx->sample_interval * 2);
377 mutex_unlock(&ctx->kdamond_lock);
383 * damon_stop() - Stops the monitorings for a given group of contexts.
384 * @ctxs: an array of the pointers for contexts to stop monitoring
385 * @nr_ctxs: size of @ctxs
387 * Return: 0 on success, negative error code otherwise.
389 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
393 for (i = 0; i < nr_ctxs; i++) {
394 /* nr_running_ctxs is decremented in kdamond_fn */
395 err = __damon_stop(ctxs[i]);
404 * damon_check_reset_time_interval() - Check if a time interval is elapsed.
405 * @baseline: the time to check whether the interval has elapsed since
406 * @interval: the time interval (microseconds)
408 * See whether the given time interval has passed since the given baseline
409 * time. If so, it also updates the baseline to current time for next check.
411 * Return: true if the time interval has passed, or false otherwise.
413 static bool damon_check_reset_time_interval(struct timespec64 *baseline,
414 unsigned long interval)
416 struct timespec64 now;
418 ktime_get_coarse_ts64(&now);
419 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
427 * Check whether it is time to flush the aggregated information
429 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
431 return damon_check_reset_time_interval(&ctx->last_aggregation,
436 * Reset the aggregated monitoring results ('nr_accesses' of each region).
438 static void kdamond_reset_aggregated(struct damon_ctx *c)
440 struct damon_target *t;
442 damon_for_each_target(t, c) {
443 struct damon_region *r;
445 damon_for_each_region(r, t) {
446 trace_damon_aggregated(t, r, damon_nr_regions(t));
452 #define sz_damon_region(r) (r->ar.end - r->ar.start)
455 * Merge two adjacent regions into one region
457 static void damon_merge_two_regions(struct damon_target *t,
458 struct damon_region *l, struct damon_region *r)
460 unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
462 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
464 l->ar.end = r->ar.end;
465 damon_destroy_region(r, t);
468 #define diff_of(a, b) (a > b ? a - b : b - a)
471 * Merge adjacent regions having similar access frequencies
473 * t target affected by this merge operation
474 * thres '->nr_accesses' diff threshold for the merge
475 * sz_limit size upper limit of each region
477 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
478 unsigned long sz_limit)
480 struct damon_region *r, *prev = NULL, *next;
482 damon_for_each_region_safe(r, next, t) {
483 if (prev && prev->ar.end == r->ar.start &&
484 diff_of(prev->nr_accesses, r->nr_accesses) <= thres &&
485 sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
486 damon_merge_two_regions(t, prev, r);
493 * Merge adjacent regions having similar access frequencies
495 * threshold '->nr_accesses' diff threshold for the merge
496 * sz_limit size upper limit of each region
498 * This function merges monitoring target regions which are adjacent and their
499 * access frequencies are similar. This is for minimizing the monitoring
500 * overhead under the dynamically changeable access pattern. If a merge was
501 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
503 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
504 unsigned long sz_limit)
506 struct damon_target *t;
508 damon_for_each_target(t, c)
509 damon_merge_regions_of(t, threshold, sz_limit);
513 * Split a region in two
515 * r the region to be split
516 * sz_r size of the first sub-region that will be made
518 static void damon_split_region_at(struct damon_ctx *ctx,
519 struct damon_target *t, struct damon_region *r,
522 struct damon_region *new;
524 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
528 r->ar.end = new->ar.start;
530 damon_insert_region(new, r, damon_next_region(r), t);
533 /* Split every region in the given target into 'nr_subs' regions */
534 static void damon_split_regions_of(struct damon_ctx *ctx,
535 struct damon_target *t, int nr_subs)
537 struct damon_region *r, *next;
538 unsigned long sz_region, sz_sub = 0;
541 damon_for_each_region_safe(r, next, t) {
542 sz_region = r->ar.end - r->ar.start;
544 for (i = 0; i < nr_subs - 1 &&
545 sz_region > 2 * DAMON_MIN_REGION; i++) {
547 * Randomly select size of left sub-region to be at
548 * least 10 percent and at most 90% of original region
550 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
551 sz_region / 10, DAMON_MIN_REGION);
552 /* Do not allow blank region */
553 if (sz_sub == 0 || sz_sub >= sz_region)
556 damon_split_region_at(ctx, t, r, sz_sub);
563 * Split every target region into randomly-sized small regions
565 * This function splits every target region into random-sized small regions if
566 * current total number of the regions is equal or smaller than half of the
567 * user-specified maximum number of regions. This is for maximizing the
568 * monitoring accuracy under the dynamically changeable access patterns. If a
569 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
572 static void kdamond_split_regions(struct damon_ctx *ctx)
574 struct damon_target *t;
575 unsigned int nr_regions = 0;
576 static unsigned int last_nr_regions;
577 int nr_subregions = 2;
579 damon_for_each_target(t, ctx)
580 nr_regions += damon_nr_regions(t);
582 if (nr_regions > ctx->max_nr_regions / 2)
585 /* Maybe the middle of the region has different access frequency */
586 if (last_nr_regions == nr_regions &&
587 nr_regions < ctx->max_nr_regions / 3)
590 damon_for_each_target(t, ctx)
591 damon_split_regions_of(ctx, t, nr_subregions);
593 last_nr_regions = nr_regions;
597 * Check whether it is time to check and apply the target monitoring regions
599 * Returns true if it is.
601 static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
603 return damon_check_reset_time_interval(&ctx->last_primitive_update,
604 ctx->primitive_update_interval);
608 * Check whether current monitoring should be stopped
610 * The monitoring is stopped when either the user requested to stop, or all
611 * monitoring targets are invalid.
613 * Returns true if need to stop current monitoring.
615 static bool kdamond_need_stop(struct damon_ctx *ctx)
617 struct damon_target *t;
620 mutex_lock(&ctx->kdamond_lock);
621 stop = ctx->kdamond_stop;
622 mutex_unlock(&ctx->kdamond_lock);
626 if (!ctx->primitive.target_valid)
629 damon_for_each_target(t, ctx) {
630 if (ctx->primitive.target_valid(t))
637 static void set_kdamond_stop(struct damon_ctx *ctx)
639 mutex_lock(&ctx->kdamond_lock);
640 ctx->kdamond_stop = true;
641 mutex_unlock(&ctx->kdamond_lock);
645 * The monitoring daemon that runs as a kernel thread
647 static int kdamond_fn(void *data)
649 struct damon_ctx *ctx = (struct damon_ctx *)data;
650 struct damon_target *t;
651 struct damon_region *r, *next;
652 unsigned int max_nr_accesses = 0;
653 unsigned long sz_limit = 0;
655 mutex_lock(&ctx->kdamond_lock);
656 pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
657 mutex_unlock(&ctx->kdamond_lock);
659 if (ctx->primitive.init)
660 ctx->primitive.init(ctx);
661 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
662 set_kdamond_stop(ctx);
664 sz_limit = damon_region_sz_limit(ctx);
666 while (!kdamond_need_stop(ctx)) {
667 if (ctx->primitive.prepare_access_checks)
668 ctx->primitive.prepare_access_checks(ctx);
669 if (ctx->callback.after_sampling &&
670 ctx->callback.after_sampling(ctx))
671 set_kdamond_stop(ctx);
673 usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
675 if (ctx->primitive.check_accesses)
676 max_nr_accesses = ctx->primitive.check_accesses(ctx);
678 if (kdamond_aggregate_interval_passed(ctx)) {
679 kdamond_merge_regions(ctx,
680 max_nr_accesses / 10,
682 if (ctx->callback.after_aggregation &&
683 ctx->callback.after_aggregation(ctx))
684 set_kdamond_stop(ctx);
685 kdamond_reset_aggregated(ctx);
686 kdamond_split_regions(ctx);
687 if (ctx->primitive.reset_aggregated)
688 ctx->primitive.reset_aggregated(ctx);
691 if (kdamond_need_update_primitive(ctx)) {
692 if (ctx->primitive.update)
693 ctx->primitive.update(ctx);
694 sz_limit = damon_region_sz_limit(ctx);
697 damon_for_each_target(t, ctx) {
698 damon_for_each_region_safe(r, next, t)
699 damon_destroy_region(r, t);
702 if (ctx->callback.before_terminate &&
703 ctx->callback.before_terminate(ctx))
704 set_kdamond_stop(ctx);
705 if (ctx->primitive.cleanup)
706 ctx->primitive.cleanup(ctx);
708 pr_debug("kdamond (%d) finishes\n", ctx->kdamond->pid);
709 mutex_lock(&ctx->kdamond_lock);
711 mutex_unlock(&ctx->kdamond_lock);
713 mutex_lock(&damon_lock);
715 mutex_unlock(&damon_lock);
720 #include "core-test.h"