1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON Primitives for The Physical Address Space
8 #define pr_fmt(fmt) "damon-pa: " fmt
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
16 #include "../internal.h"
17 #include "ops-common.h"
19 static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
20 unsigned long addr, void *arg)
22 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
24 while (page_vma_mapped_walk(&pvmw)) {
27 damon_ptep_mkold(pvmw.pte, vma, addr);
29 damon_pmdp_mkold(pvmw.pmd, vma, addr);
34 static void damon_pa_mkold(unsigned long paddr)
36 struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
37 struct rmap_walk_control rwc = {
38 .rmap_one = __damon_pa_mkold,
39 .anon_lock = folio_lock_anon_vma_read,
46 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
47 folio_set_idle(folio);
51 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
52 if (need_lock && !folio_trylock(folio))
55 rmap_walk(folio, &rwc);
64 static void __damon_pa_prepare_access_check(struct damon_region *r)
66 r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
68 damon_pa_mkold(r->sampling_addr);
71 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
73 struct damon_target *t;
74 struct damon_region *r;
76 damon_for_each_target(t, ctx) {
77 damon_for_each_region(r, t)
78 __damon_pa_prepare_access_check(r);
82 static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
83 unsigned long addr, void *arg)
86 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
89 while (page_vma_mapped_walk(&pvmw)) {
92 *accessed = pte_young(ptep_get(pvmw.pte)) ||
93 !folio_test_idle(folio) ||
94 mmu_notifier_test_young(vma->vm_mm, addr);
96 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
97 *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
98 !folio_test_idle(folio) ||
99 mmu_notifier_test_young(vma->vm_mm, addr);
102 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
105 page_vma_mapped_walk_done(&pvmw);
110 /* If accessed, stop walking */
111 return *accessed == false;
114 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
116 struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
117 bool accessed = false;
118 struct rmap_walk_control rwc = {
120 .rmap_one = __damon_pa_young,
121 .anon_lock = folio_lock_anon_vma_read,
128 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
129 if (folio_test_idle(folio))
136 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
137 if (need_lock && !folio_trylock(folio))
140 rmap_walk(folio, &rwc);
146 *folio_sz = folio_size(folio);
151 static void __damon_pa_check_access(struct damon_region *r,
152 struct damon_attrs *attrs)
154 static unsigned long last_addr;
155 static unsigned long last_folio_sz = PAGE_SIZE;
156 static bool last_accessed;
158 /* If the region is in the last checked page, reuse the result */
159 if (ALIGN_DOWN(last_addr, last_folio_sz) ==
160 ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
161 damon_update_region_access_rate(r, last_accessed, attrs);
165 last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
166 damon_update_region_access_rate(r, last_accessed, attrs);
168 last_addr = r->sampling_addr;
171 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
173 struct damon_target *t;
174 struct damon_region *r;
175 unsigned int max_nr_accesses = 0;
177 damon_for_each_target(t, ctx) {
178 damon_for_each_region(r, t) {
179 __damon_pa_check_access(r, &ctx->attrs);
180 max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
184 return max_nr_accesses;
187 static bool __damos_pa_filter_out(struct damos_filter *filter,
190 bool matched = false;
191 struct mem_cgroup *memcg;
193 switch (filter->type) {
194 case DAMOS_FILTER_TYPE_ANON:
195 matched = folio_test_anon(folio);
197 case DAMOS_FILTER_TYPE_MEMCG:
199 memcg = folio_memcg_check(folio);
203 matched = filter->memcg_id == mem_cgroup_id(memcg);
210 return matched == filter->matching;
214 * damos_pa_filter_out - Return true if the page should be filtered out.
216 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
218 struct damos_filter *filter;
220 damos_for_each_filter(filter, scheme) {
221 if (__damos_pa_filter_out(filter, folio))
227 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
229 unsigned long addr, applied;
230 LIST_HEAD(folio_list);
232 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
233 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
238 if (damos_pa_filter_out(s, folio))
241 folio_clear_referenced(folio);
242 folio_test_clear_young(folio);
243 if (!folio_isolate_lru(folio))
245 if (folio_test_unevictable(folio))
246 folio_putback_lru(folio);
248 list_add(&folio->lru, &folio_list);
252 applied = reclaim_pages(&folio_list);
254 return applied * PAGE_SIZE;
257 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
258 struct damon_region *r, struct damos *s, bool mark_accessed)
260 unsigned long addr, applied = 0;
262 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
263 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
268 if (damos_pa_filter_out(s, folio))
272 folio_mark_accessed(folio);
274 folio_deactivate(folio);
275 applied += folio_nr_pages(folio);
279 return applied * PAGE_SIZE;
282 static unsigned long damon_pa_mark_accessed(struct damon_region *r,
285 return damon_pa_mark_accessed_or_deactivate(r, s, true);
288 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
291 return damon_pa_mark_accessed_or_deactivate(r, s, false);
294 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
295 struct damon_target *t, struct damon_region *r,
296 struct damos *scheme)
298 switch (scheme->action) {
300 return damon_pa_pageout(r, scheme);
302 return damon_pa_mark_accessed(r, scheme);
303 case DAMOS_LRU_DEPRIO:
304 return damon_pa_deactivate_pages(r, scheme);
308 /* DAMOS actions that not yet supported by 'paddr'. */
314 static int damon_pa_scheme_score(struct damon_ctx *context,
315 struct damon_target *t, struct damon_region *r,
316 struct damos *scheme)
318 switch (scheme->action) {
320 return damon_cold_score(context, r, scheme);
322 return damon_hot_score(context, r, scheme);
323 case DAMOS_LRU_DEPRIO:
324 return damon_cold_score(context, r, scheme);
329 return DAMOS_MAX_SCORE;
332 static int __init damon_pa_initcall(void)
334 struct damon_operations ops = {
335 .id = DAMON_OPS_PADDR,
338 .prepare_access_checks = damon_pa_prepare_access_checks,
339 .check_accesses = damon_pa_check_accesses,
340 .reset_aggregated = NULL,
341 .target_valid = NULL,
343 .apply_scheme = damon_pa_apply_scheme,
344 .get_scheme_score = damon_pa_scheme_score,
347 return damon_register_ops(&ops);
350 subsys_initcall(damon_pa_initcall);