1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON Primitives for Virtual Address Spaces
8 #define pr_fmt(fmt) "damon-va: " fmt
10 #include <linux/highmem.h>
11 #include <linux/hugetlb.h>
12 #include <linux/mman.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_idle.h>
15 #include <linux/pagewalk.h>
16 #include <linux/sched/mm.h>
18 #include "ops-common.h"
20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
26 * 't->pid' should be the pointer to the relevant 'struct pid' having reference
27 * count. Caller must put the returned task, unless it is NULL.
29 static inline struct task_struct *damon_get_task_struct(struct damon_target *t)
31 return get_pid_task(t->pid, PIDTYPE_PID);
35 * Get the mm_struct of the given target
37 * Caller _must_ put the mm_struct after use, unless it is NULL.
39 * Returns the mm_struct of the target on success, NULL on failure
41 static struct mm_struct *damon_get_mm(struct damon_target *t)
43 struct task_struct *task;
46 task = damon_get_task_struct(t);
50 mm = get_task_mm(task);
51 put_task_struct(task);
56 * Functions for the initial monitoring target regions construction
60 * Size-evenly split a region into 'nr_pieces' small regions
62 * Returns 0 on success, or negative error code otherwise.
64 static int damon_va_evenly_split_region(struct damon_target *t,
65 struct damon_region *r, unsigned int nr_pieces)
67 unsigned long sz_orig, sz_piece, orig_end;
68 struct damon_region *n = NULL, *next;
75 sz_orig = damon_sz_region(r);
76 sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
81 r->ar.end = r->ar.start + sz_piece;
82 next = damon_next_region(r);
83 for (start = r->ar.end; start + sz_piece <= orig_end;
85 n = damon_new_region(start, start + sz_piece);
88 damon_insert_region(n, r, next, t);
91 /* complement last region for possible rounding error */
98 static unsigned long sz_range(struct damon_addr_range *r)
100 return r->end - r->start;
104 * Find three regions separated by two biggest unmapped regions
106 * vma the head vma of the target address space
107 * regions an array of three address ranges that results will be saved
109 * This function receives an address space and finds three regions in it which
110 * separated by the two biggest unmapped regions in the space. Please refer to
111 * below comments of '__damon_va_init_regions()' function to know why this is
114 * Returns 0 if success, or negative error code otherwise.
116 static int __damon_va_three_regions(struct mm_struct *mm,
117 struct damon_addr_range regions[3])
119 struct damon_addr_range first_gap = {0}, second_gap = {0};
120 VMA_ITERATOR(vmi, mm, 0);
121 struct vm_area_struct *vma, *prev = NULL;
125 * Find the two biggest gaps so that first_gap > second_gap > others.
126 * If this is too slow, it can be optimised to examine the maple
130 for_each_vma(vmi, vma) {
134 start = vma->vm_start;
137 gap = vma->vm_start - prev->vm_end;
139 if (gap > sz_range(&first_gap)) {
140 second_gap = first_gap;
141 first_gap.start = prev->vm_end;
142 first_gap.end = vma->vm_start;
143 } else if (gap > sz_range(&second_gap)) {
144 second_gap.start = prev->vm_end;
145 second_gap.end = vma->vm_start;
152 if (!sz_range(&second_gap) || !sz_range(&first_gap))
155 /* Sort the two biggest gaps by address */
156 if (first_gap.start > second_gap.start)
157 swap(first_gap, second_gap);
159 /* Store the result */
160 regions[0].start = ALIGN(start, DAMON_MIN_REGION);
161 regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
162 regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
163 regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
164 regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
165 regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION);
171 * Get the three regions in the given target (task)
173 * Returns 0 on success, negative error code otherwise.
175 static int damon_va_three_regions(struct damon_target *t,
176 struct damon_addr_range regions[3])
178 struct mm_struct *mm;
181 mm = damon_get_mm(t);
186 rc = __damon_va_three_regions(mm, regions);
187 mmap_read_unlock(mm);
194 * Initialize the monitoring target regions for the given target (task)
198 * Because only a number of small portions of the entire address space
199 * is actually mapped to the memory and accessed, monitoring the unmapped
200 * regions is wasteful. That said, because we can deal with small noises,
201 * tracking every mapping is not strictly required but could even incur a high
202 * overhead if the mapping frequently changes or the number of mappings is
203 * high. The adaptive regions adjustment mechanism will further help to deal
204 * with the noise by simply identifying the unmapped areas as a region that
205 * has no access. Moreover, applying the real mappings that would have many
206 * unmapped areas inside will make the adaptive mechanism quite complex. That
207 * said, too huge unmapped areas inside the monitoring target should be removed
208 * to not take the time for the adaptive mechanism.
210 * For the reason, we convert the complex mappings to three distinct regions
211 * that cover every mapped area of the address space. Also the two gaps
212 * between the three regions are the two biggest unmapped areas in the given
213 * address space. In detail, this function first identifies the start and the
214 * end of the mappings and the two biggest unmapped areas of the address space.
215 * Then, it constructs the three regions as below:
217 * [mappings[0]->start, big_two_unmapped_areas[0]->start)
218 * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
219 * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
221 * As usual memory map of processes is as below, the gap between the heap and
222 * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
223 * region and the stack will be two biggest unmapped regions. Because these
224 * gaps are exceptionally huge areas in usual address space, excluding these
225 * two biggest unmapped regions will be sufficient to make a trade-off.
228 * <BIG UNMAPPED REGION 1>
229 * <uppermost mmap()-ed region>
230 * (other mmap()-ed regions and small unmapped regions)
231 * <lowermost mmap()-ed region>
232 * <BIG UNMAPPED REGION 2>
235 static void __damon_va_init_regions(struct damon_ctx *ctx,
236 struct damon_target *t)
238 struct damon_target *ti;
239 struct damon_region *r;
240 struct damon_addr_range regions[3];
241 unsigned long sz = 0, nr_pieces;
244 if (damon_va_three_regions(t, regions)) {
245 damon_for_each_target(ti, ctx) {
250 pr_debug("Failed to get three regions of %dth target\n", tidx);
254 for (i = 0; i < 3; i++)
255 sz += regions[i].end - regions[i].start;
256 if (ctx->attrs.min_nr_regions)
257 sz /= ctx->attrs.min_nr_regions;
258 if (sz < DAMON_MIN_REGION)
259 sz = DAMON_MIN_REGION;
261 /* Set the initial three regions of the target */
262 for (i = 0; i < 3; i++) {
263 r = damon_new_region(regions[i].start, regions[i].end);
265 pr_err("%d'th init region creation failed\n", i);
268 damon_add_region(r, t);
270 nr_pieces = (regions[i].end - regions[i].start) / sz;
271 damon_va_evenly_split_region(t, r, nr_pieces);
275 /* Initialize '->regions_list' of every target (task) */
276 static void damon_va_init(struct damon_ctx *ctx)
278 struct damon_target *t;
280 damon_for_each_target(t, ctx) {
281 /* the user may set the target regions as they want */
282 if (!damon_nr_regions(t))
283 __damon_va_init_regions(ctx, t);
288 * Update regions for current memory mappings
290 static void damon_va_update(struct damon_ctx *ctx)
292 struct damon_addr_range three_regions[3];
293 struct damon_target *t;
295 damon_for_each_target(t, ctx) {
296 if (damon_va_three_regions(t, three_regions))
298 damon_set_regions(t, three_regions, 3);
302 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
303 unsigned long next, struct mm_walk *walk)
309 if (pmd_trans_huge(pmdp_get(pmd))) {
310 ptl = pmd_lock(walk->mm, pmd);
311 pmde = pmdp_get(pmd);
313 if (!pmd_present(pmde)) {
318 if (pmd_trans_huge(pmde)) {
319 damon_pmdp_mkold(pmd, walk->vma, addr);
326 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
328 walk->action = ACTION_AGAIN;
331 if (!pte_present(ptep_get(pte)))
333 damon_ptep_mkold(pte, walk->vma, addr);
335 pte_unmap_unlock(pte, ptl);
339 #ifdef CONFIG_HUGETLB_PAGE
340 static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
341 struct vm_area_struct *vma, unsigned long addr)
343 bool referenced = false;
344 pte_t entry = huge_ptep_get(mm, addr, pte);
345 struct folio *folio = pfn_folio(pte_pfn(entry));
346 unsigned long psize = huge_page_size(hstate_vma(vma));
350 if (pte_young(entry)) {
352 entry = pte_mkold(entry);
353 set_huge_pte_at(mm, addr, pte, entry, psize);
356 #ifdef CONFIG_MMU_NOTIFIER
357 if (mmu_notifier_clear_young(mm, addr,
358 addr + huge_page_size(hstate_vma(vma))))
360 #endif /* CONFIG_MMU_NOTIFIER */
363 folio_set_young(folio);
365 folio_set_idle(folio);
369 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
370 unsigned long addr, unsigned long end,
371 struct mm_walk *walk)
373 struct hstate *h = hstate_vma(walk->vma);
377 ptl = huge_pte_lock(h, walk->mm, pte);
378 entry = huge_ptep_get(walk->mm, addr, pte);
379 if (!pte_present(entry))
382 damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr);
389 #define damon_mkold_hugetlb_entry NULL
390 #endif /* CONFIG_HUGETLB_PAGE */
392 static const struct mm_walk_ops damon_mkold_ops = {
393 .pmd_entry = damon_mkold_pmd_entry,
394 .hugetlb_entry = damon_mkold_hugetlb_entry,
395 .walk_lock = PGWALK_RDLOCK,
398 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
401 walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
402 mmap_read_unlock(mm);
406 * Functions for the access checking of the regions
409 static void __damon_va_prepare_access_check(struct mm_struct *mm,
410 struct damon_region *r)
412 r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
414 damon_va_mkold(mm, r->sampling_addr);
417 static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
419 struct damon_target *t;
420 struct mm_struct *mm;
421 struct damon_region *r;
423 damon_for_each_target(t, ctx) {
424 mm = damon_get_mm(t);
427 damon_for_each_region(r, t)
428 __damon_va_prepare_access_check(mm, r);
433 struct damon_young_walk_private {
434 /* size of the folio for the access checked virtual memory address */
435 unsigned long *folio_sz;
439 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
440 unsigned long next, struct mm_walk *walk)
446 struct damon_young_walk_private *priv = walk->private;
448 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
449 if (pmd_trans_huge(pmdp_get(pmd))) {
452 ptl = pmd_lock(walk->mm, pmd);
453 pmde = pmdp_get(pmd);
455 if (!pmd_present(pmde)) {
460 if (!pmd_trans_huge(pmde)) {
464 folio = damon_get_folio(pmd_pfn(pmde));
467 if (pmd_young(pmde) || !folio_test_idle(folio) ||
468 mmu_notifier_test_young(walk->mm,
471 *priv->folio_sz = HPAGE_PMD_SIZE;
479 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
481 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
483 walk->action = ACTION_AGAIN;
486 ptent = ptep_get(pte);
487 if (!pte_present(ptent))
489 folio = damon_get_folio(pte_pfn(ptent));
492 if (pte_young(ptent) || !folio_test_idle(folio) ||
493 mmu_notifier_test_young(walk->mm, addr))
495 *priv->folio_sz = folio_size(folio);
498 pte_unmap_unlock(pte, ptl);
502 #ifdef CONFIG_HUGETLB_PAGE
503 static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
504 unsigned long addr, unsigned long end,
505 struct mm_walk *walk)
507 struct damon_young_walk_private *priv = walk->private;
508 struct hstate *h = hstate_vma(walk->vma);
513 ptl = huge_pte_lock(h, walk->mm, pte);
514 entry = huge_ptep_get(walk->mm, addr, pte);
515 if (!pte_present(entry))
518 folio = pfn_folio(pte_pfn(entry));
521 if (pte_young(entry) || !folio_test_idle(folio) ||
522 mmu_notifier_test_young(walk->mm, addr))
524 *priv->folio_sz = huge_page_size(h);
533 #define damon_young_hugetlb_entry NULL
534 #endif /* CONFIG_HUGETLB_PAGE */
536 static const struct mm_walk_ops damon_young_ops = {
537 .pmd_entry = damon_young_pmd_entry,
538 .hugetlb_entry = damon_young_hugetlb_entry,
539 .walk_lock = PGWALK_RDLOCK,
542 static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
543 unsigned long *folio_sz)
545 struct damon_young_walk_private arg = {
546 .folio_sz = folio_sz,
551 walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
552 mmap_read_unlock(mm);
557 * Check whether the region was accessed after the last preparation
559 * mm 'mm_struct' for the given virtual address space
560 * r the region to be checked
562 static void __damon_va_check_access(struct mm_struct *mm,
563 struct damon_region *r, bool same_target,
564 struct damon_attrs *attrs)
566 static unsigned long last_addr;
567 static unsigned long last_folio_sz = PAGE_SIZE;
568 static bool last_accessed;
571 damon_update_region_access_rate(r, false, attrs);
575 /* If the region is in the last checked page, reuse the result */
576 if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
577 ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
578 damon_update_region_access_rate(r, last_accessed, attrs);
582 last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
583 damon_update_region_access_rate(r, last_accessed, attrs);
585 last_addr = r->sampling_addr;
588 static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
590 struct damon_target *t;
591 struct mm_struct *mm;
592 struct damon_region *r;
593 unsigned int max_nr_accesses = 0;
596 damon_for_each_target(t, ctx) {
597 mm = damon_get_mm(t);
599 damon_for_each_region(r, t) {
600 __damon_va_check_access(mm, r, same_target,
602 max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
609 return max_nr_accesses;
613 * Functions for the target validity check and cleanup
616 static bool damon_va_target_valid(struct damon_target *t)
618 struct task_struct *task;
620 task = damon_get_task_struct(t);
622 put_task_struct(task);
629 #ifndef CONFIG_ADVISE_SYSCALLS
630 static unsigned long damos_madvise(struct damon_target *target,
631 struct damon_region *r, int behavior)
636 static unsigned long damos_madvise(struct damon_target *target,
637 struct damon_region *r, int behavior)
639 struct mm_struct *mm;
640 unsigned long start = PAGE_ALIGN(r->ar.start);
641 unsigned long len = PAGE_ALIGN(damon_sz_region(r));
642 unsigned long applied;
644 mm = damon_get_mm(target);
648 applied = do_madvise(mm, start, len, behavior) ? 0 : len;
653 #endif /* CONFIG_ADVISE_SYSCALLS */
655 static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
656 struct damon_target *t, struct damon_region *r,
657 struct damos *scheme)
661 switch (scheme->action) {
663 madv_action = MADV_WILLNEED;
666 madv_action = MADV_COLD;
669 madv_action = MADV_PAGEOUT;
672 madv_action = MADV_HUGEPAGE;
674 case DAMOS_NOHUGEPAGE:
675 madv_action = MADV_NOHUGEPAGE;
681 * DAMOS actions that are not yet supported by 'vaddr'.
686 return damos_madvise(t, r, madv_action);
689 static int damon_va_scheme_score(struct damon_ctx *context,
690 struct damon_target *t, struct damon_region *r,
691 struct damos *scheme)
694 switch (scheme->action) {
696 return damon_cold_score(context, r, scheme);
701 return DAMOS_MAX_SCORE;
704 static int __init damon_va_initcall(void)
706 struct damon_operations ops = {
707 .id = DAMON_OPS_VADDR,
708 .init = damon_va_init,
709 .update = damon_va_update,
710 .prepare_access_checks = damon_va_prepare_access_checks,
711 .check_accesses = damon_va_check_accesses,
712 .reset_aggregated = NULL,
713 .target_valid = damon_va_target_valid,
715 .apply_scheme = damon_va_apply_scheme,
716 .get_scheme_score = damon_va_scheme_score,
718 /* ops for fixed virtual address ranges */
719 struct damon_operations ops_fvaddr = ops;
722 /* Don't set the monitoring target regions for the entire mapping */
723 ops_fvaddr.id = DAMON_OPS_FVADDR;
724 ops_fvaddr.init = NULL;
725 ops_fvaddr.update = NULL;
727 err = damon_register_ops(&ops);
730 return damon_register_ops(&ops_fvaddr);
733 subsys_initcall(damon_va_initcall);
735 #include "tests/vaddr-kunit.h"