1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON Primitives for The Physical Address Space
8 #define pr_fmt(fmt) "damon-pa: " fmt
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
16 #include "../internal.h"
17 #include "prmtv-common.h"
19 static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
20 unsigned long addr, void *arg)
22 struct page_vma_mapped_walk pvmw = {
28 while (page_vma_mapped_walk(&pvmw)) {
31 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
33 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
38 static void damon_pa_mkold(unsigned long paddr)
40 struct page *page = damon_get_page(PHYS_PFN(paddr));
41 struct rmap_walk_control rwc = {
42 .rmap_one = __damon_pa_mkold,
43 .anon_lock = page_lock_anon_vma_read,
50 if (!page_mapped(page) || !page_rmapping(page)) {
55 need_lock = !PageAnon(page) || PageKsm(page);
56 if (need_lock && !trylock_page(page))
59 rmap_walk(page, &rwc);
68 static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
69 struct damon_region *r)
71 r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
73 damon_pa_mkold(r->sampling_addr);
76 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
78 struct damon_target *t;
79 struct damon_region *r;
81 damon_for_each_target(t, ctx) {
82 damon_for_each_region(r, t)
83 __damon_pa_prepare_access_check(ctx, r);
87 struct damon_pa_access_chk_result {
88 unsigned long page_sz;
92 static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
93 unsigned long addr, void *arg)
95 struct damon_pa_access_chk_result *result = arg;
96 struct page_vma_mapped_walk pvmw = {
102 result->accessed = false;
103 result->page_sz = PAGE_SIZE;
104 while (page_vma_mapped_walk(&pvmw)) {
107 result->accessed = pte_young(*pvmw.pte) ||
108 !page_is_idle(page) ||
109 mmu_notifier_test_young(vma->vm_mm, addr);
111 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
112 result->accessed = pmd_young(*pvmw.pmd) ||
113 !page_is_idle(page) ||
114 mmu_notifier_test_young(vma->vm_mm, addr);
115 result->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
118 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
120 if (result->accessed) {
121 page_vma_mapped_walk_done(&pvmw);
126 /* If accessed, stop walking */
127 return !result->accessed;
130 static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
132 struct page *page = damon_get_page(PHYS_PFN(paddr));
133 struct damon_pa_access_chk_result result = {
134 .page_sz = PAGE_SIZE,
137 struct rmap_walk_control rwc = {
139 .rmap_one = __damon_pa_young,
140 .anon_lock = page_lock_anon_vma_read,
147 if (!page_mapped(page) || !page_rmapping(page)) {
148 if (page_is_idle(page))
149 result.accessed = false;
151 result.accessed = true;
156 need_lock = !PageAnon(page) || PageKsm(page);
157 if (need_lock && !trylock_page(page)) {
162 rmap_walk(page, &rwc);
169 *page_sz = result.page_sz;
170 return result.accessed;
173 static void __damon_pa_check_access(struct damon_ctx *ctx,
174 struct damon_region *r)
176 static unsigned long last_addr;
177 static unsigned long last_page_sz = PAGE_SIZE;
178 static bool last_accessed;
180 /* If the region is in the last checked page, reuse the result */
181 if (ALIGN_DOWN(last_addr, last_page_sz) ==
182 ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
188 last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
192 last_addr = r->sampling_addr;
195 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
197 struct damon_target *t;
198 struct damon_region *r;
199 unsigned int max_nr_accesses = 0;
201 damon_for_each_target(t, ctx) {
202 damon_for_each_region(r, t) {
203 __damon_pa_check_access(ctx, r);
204 max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
208 return max_nr_accesses;
211 bool damon_pa_target_valid(void *t)
216 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
217 struct damon_target *t, struct damon_region *r,
218 struct damos *scheme)
220 unsigned long addr, applied;
221 LIST_HEAD(page_list);
223 if (scheme->action != DAMOS_PAGEOUT)
226 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
227 struct page *page = damon_get_page(PHYS_PFN(addr));
232 ClearPageReferenced(page);
233 test_and_clear_page_young(page);
234 if (isolate_lru_page(page)) {
238 if (PageUnevictable(page)) {
239 putback_lru_page(page);
241 list_add(&page->lru, &page_list);
245 applied = reclaim_pages(&page_list);
247 return applied * PAGE_SIZE;
250 static int damon_pa_scheme_score(struct damon_ctx *context,
251 struct damon_target *t, struct damon_region *r,
252 struct damos *scheme)
254 switch (scheme->action) {
256 return damon_pageout_score(context, r, scheme);
261 return DAMOS_MAX_SCORE;
264 void damon_pa_set_primitives(struct damon_ctx *ctx)
266 ctx->primitive.init = NULL;
267 ctx->primitive.update = NULL;
268 ctx->primitive.prepare_access_checks = damon_pa_prepare_access_checks;
269 ctx->primitive.check_accesses = damon_pa_check_accesses;
270 ctx->primitive.reset_aggregated = NULL;
271 ctx->primitive.target_valid = damon_pa_target_valid;
272 ctx->primitive.cleanup = NULL;
273 ctx->primitive.apply_scheme = damon_pa_apply_scheme;
274 ctx->primitive.get_scheme_score = damon_pa_scheme_score;