1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON Primitives for The Physical Address Space
8 #define pr_fmt(fmt) "damon-pa: " fmt
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
16 #include "../internal.h"
17 #include "ops-common.h"
19 static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
20 unsigned long addr, void *arg)
22 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
24 while (page_vma_mapped_walk(&pvmw)) {
27 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
29 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
34 static void damon_pa_mkold(unsigned long paddr)
37 struct page *page = damon_get_page(PHYS_PFN(paddr));
38 struct rmap_walk_control rwc = {
39 .rmap_one = __damon_pa_mkold,
40 .anon_lock = folio_lock_anon_vma_read,
46 folio = page_folio(page);
48 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
49 folio_set_idle(folio);
53 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
54 if (need_lock && !folio_trylock(folio))
57 rmap_walk(folio, &rwc);
66 static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
67 struct damon_region *r)
69 r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
71 damon_pa_mkold(r->sampling_addr);
74 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
76 struct damon_target *t;
77 struct damon_region *r;
79 damon_for_each_target(t, ctx) {
80 damon_for_each_region(r, t)
81 __damon_pa_prepare_access_check(ctx, r);
85 struct damon_pa_access_chk_result {
86 unsigned long page_sz;
90 static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
91 unsigned long addr, void *arg)
93 struct damon_pa_access_chk_result *result = arg;
94 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
96 result->accessed = false;
97 result->page_sz = PAGE_SIZE;
98 while (page_vma_mapped_walk(&pvmw)) {
101 result->accessed = pte_young(*pvmw.pte) ||
102 !folio_test_idle(folio) ||
103 mmu_notifier_test_young(vma->vm_mm, addr);
105 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
106 result->accessed = pmd_young(*pvmw.pmd) ||
107 !folio_test_idle(folio) ||
108 mmu_notifier_test_young(vma->vm_mm, addr);
109 result->page_sz = HPAGE_PMD_SIZE;
112 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
114 if (result->accessed) {
115 page_vma_mapped_walk_done(&pvmw);
120 /* If accessed, stop walking */
121 return !result->accessed;
124 static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
127 struct page *page = damon_get_page(PHYS_PFN(paddr));
128 struct damon_pa_access_chk_result result = {
129 .page_sz = PAGE_SIZE,
132 struct rmap_walk_control rwc = {
134 .rmap_one = __damon_pa_young,
135 .anon_lock = folio_lock_anon_vma_read,
141 folio = page_folio(page);
143 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
144 if (folio_test_idle(folio))
145 result.accessed = false;
147 result.accessed = true;
152 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
153 if (need_lock && !folio_trylock(folio)) {
158 rmap_walk(folio, &rwc);
165 *page_sz = result.page_sz;
166 return result.accessed;
169 static void __damon_pa_check_access(struct damon_ctx *ctx,
170 struct damon_region *r)
172 static unsigned long last_addr;
173 static unsigned long last_page_sz = PAGE_SIZE;
174 static bool last_accessed;
176 /* If the region is in the last checked page, reuse the result */
177 if (ALIGN_DOWN(last_addr, last_page_sz) ==
178 ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
184 last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
188 last_addr = r->sampling_addr;
191 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
193 struct damon_target *t;
194 struct damon_region *r;
195 unsigned int max_nr_accesses = 0;
197 damon_for_each_target(t, ctx) {
198 damon_for_each_region(r, t) {
199 __damon_pa_check_access(ctx, r);
200 max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
204 return max_nr_accesses;
207 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
208 struct damon_target *t, struct damon_region *r,
209 struct damos *scheme)
211 unsigned long addr, applied;
212 LIST_HEAD(page_list);
214 if (scheme->action != DAMOS_PAGEOUT)
217 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
218 struct page *page = damon_get_page(PHYS_PFN(addr));
223 ClearPageReferenced(page);
224 test_and_clear_page_young(page);
225 if (isolate_lru_page(page)) {
229 if (PageUnevictable(page)) {
230 putback_lru_page(page);
232 list_add(&page->lru, &page_list);
236 applied = reclaim_pages(&page_list);
238 return applied * PAGE_SIZE;
241 static int damon_pa_scheme_score(struct damon_ctx *context,
242 struct damon_target *t, struct damon_region *r,
243 struct damos *scheme)
245 switch (scheme->action) {
247 return damon_pageout_score(context, r, scheme);
252 return DAMOS_MAX_SCORE;
255 static int __init damon_pa_initcall(void)
257 struct damon_operations ops = {
258 .id = DAMON_OPS_PADDR,
261 .prepare_access_checks = damon_pa_prepare_access_checks,
262 .check_accesses = damon_pa_check_accesses,
263 .reset_aggregated = NULL,
264 .target_valid = NULL,
266 .apply_scheme = damon_pa_apply_scheme,
267 .get_scheme_score = damon_pa_scheme_score,
270 return damon_register_ops(&ops);
273 subsys_initcall(damon_pa_initcall);