]> Git Repo - linux.git/blob - mm/damon/paddr.c
PCI: hv: Avoid the retarget interrupt hypercall in irq_unmask() on ARM64
[linux.git] / mm / damon / paddr.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for The Physical Address Space
4  *
5  * Author: SeongJae Park <[email protected]>
6  */
7
8 #define pr_fmt(fmt) "damon-pa: " fmt
9
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15
16 #include "../internal.h"
17 #include "prmtv-common.h"
18
19 static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
20                 unsigned long addr, void *arg)
21 {
22         struct page_vma_mapped_walk pvmw = {
23                 .page = page,
24                 .vma = vma,
25                 .address = addr,
26         };
27
28         while (page_vma_mapped_walk(&pvmw)) {
29                 addr = pvmw.address;
30                 if (pvmw.pte)
31                         damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
32                 else
33                         damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
34         }
35         return true;
36 }
37
38 static void damon_pa_mkold(unsigned long paddr)
39 {
40         struct page *page = damon_get_page(PHYS_PFN(paddr));
41         struct rmap_walk_control rwc = {
42                 .rmap_one = __damon_pa_mkold,
43                 .anon_lock = page_lock_anon_vma_read,
44         };
45         bool need_lock;
46
47         if (!page)
48                 return;
49
50         if (!page_mapped(page) || !page_rmapping(page)) {
51                 set_page_idle(page);
52                 goto out;
53         }
54
55         need_lock = !PageAnon(page) || PageKsm(page);
56         if (need_lock && !trylock_page(page))
57                 goto out;
58
59         rmap_walk(page, &rwc);
60
61         if (need_lock)
62                 unlock_page(page);
63
64 out:
65         put_page(page);
66 }
67
68 static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
69                                             struct damon_region *r)
70 {
71         r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
72
73         damon_pa_mkold(r->sampling_addr);
74 }
75
76 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
77 {
78         struct damon_target *t;
79         struct damon_region *r;
80
81         damon_for_each_target(t, ctx) {
82                 damon_for_each_region(r, t)
83                         __damon_pa_prepare_access_check(ctx, r);
84         }
85 }
86
87 struct damon_pa_access_chk_result {
88         unsigned long page_sz;
89         bool accessed;
90 };
91
92 static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
93                 unsigned long addr, void *arg)
94 {
95         struct damon_pa_access_chk_result *result = arg;
96         struct page_vma_mapped_walk pvmw = {
97                 .page = page,
98                 .vma = vma,
99                 .address = addr,
100         };
101
102         result->accessed = false;
103         result->page_sz = PAGE_SIZE;
104         while (page_vma_mapped_walk(&pvmw)) {
105                 addr = pvmw.address;
106                 if (pvmw.pte) {
107                         result->accessed = pte_young(*pvmw.pte) ||
108                                 !page_is_idle(page) ||
109                                 mmu_notifier_test_young(vma->vm_mm, addr);
110                 } else {
111 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
112                         result->accessed = pmd_young(*pvmw.pmd) ||
113                                 !page_is_idle(page) ||
114                                 mmu_notifier_test_young(vma->vm_mm, addr);
115                         result->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
116 #else
117                         WARN_ON_ONCE(1);
118 #endif  /* CONFIG_TRANSPARENT_HUGEPAGE */
119                 }
120                 if (result->accessed) {
121                         page_vma_mapped_walk_done(&pvmw);
122                         break;
123                 }
124         }
125
126         /* If accessed, stop walking */
127         return !result->accessed;
128 }
129
130 static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
131 {
132         struct page *page = damon_get_page(PHYS_PFN(paddr));
133         struct damon_pa_access_chk_result result = {
134                 .page_sz = PAGE_SIZE,
135                 .accessed = false,
136         };
137         struct rmap_walk_control rwc = {
138                 .arg = &result,
139                 .rmap_one = __damon_pa_young,
140                 .anon_lock = page_lock_anon_vma_read,
141         };
142         bool need_lock;
143
144         if (!page)
145                 return false;
146
147         if (!page_mapped(page) || !page_rmapping(page)) {
148                 if (page_is_idle(page))
149                         result.accessed = false;
150                 else
151                         result.accessed = true;
152                 put_page(page);
153                 goto out;
154         }
155
156         need_lock = !PageAnon(page) || PageKsm(page);
157         if (need_lock && !trylock_page(page)) {
158                 put_page(page);
159                 return NULL;
160         }
161
162         rmap_walk(page, &rwc);
163
164         if (need_lock)
165                 unlock_page(page);
166         put_page(page);
167
168 out:
169         *page_sz = result.page_sz;
170         return result.accessed;
171 }
172
173 static void __damon_pa_check_access(struct damon_ctx *ctx,
174                                     struct damon_region *r)
175 {
176         static unsigned long last_addr;
177         static unsigned long last_page_sz = PAGE_SIZE;
178         static bool last_accessed;
179
180         /* If the region is in the last checked page, reuse the result */
181         if (ALIGN_DOWN(last_addr, last_page_sz) ==
182                                 ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
183                 if (last_accessed)
184                         r->nr_accesses++;
185                 return;
186         }
187
188         last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
189         if (last_accessed)
190                 r->nr_accesses++;
191
192         last_addr = r->sampling_addr;
193 }
194
195 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
196 {
197         struct damon_target *t;
198         struct damon_region *r;
199         unsigned int max_nr_accesses = 0;
200
201         damon_for_each_target(t, ctx) {
202                 damon_for_each_region(r, t) {
203                         __damon_pa_check_access(ctx, r);
204                         max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
205                 }
206         }
207
208         return max_nr_accesses;
209 }
210
211 bool damon_pa_target_valid(void *t)
212 {
213         return true;
214 }
215
216 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
217                 struct damon_target *t, struct damon_region *r,
218                 struct damos *scheme)
219 {
220         unsigned long addr, applied;
221         LIST_HEAD(page_list);
222
223         if (scheme->action != DAMOS_PAGEOUT)
224                 return 0;
225
226         for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
227                 struct page *page = damon_get_page(PHYS_PFN(addr));
228
229                 if (!page)
230                         continue;
231
232                 ClearPageReferenced(page);
233                 test_and_clear_page_young(page);
234                 if (isolate_lru_page(page)) {
235                         put_page(page);
236                         continue;
237                 }
238                 if (PageUnevictable(page)) {
239                         putback_lru_page(page);
240                 } else {
241                         list_add(&page->lru, &page_list);
242                         put_page(page);
243                 }
244         }
245         applied = reclaim_pages(&page_list);
246         cond_resched();
247         return applied * PAGE_SIZE;
248 }
249
250 static int damon_pa_scheme_score(struct damon_ctx *context,
251                 struct damon_target *t, struct damon_region *r,
252                 struct damos *scheme)
253 {
254         switch (scheme->action) {
255         case DAMOS_PAGEOUT:
256                 return damon_pageout_score(context, r, scheme);
257         default:
258                 break;
259         }
260
261         return DAMOS_MAX_SCORE;
262 }
263
264 void damon_pa_set_primitives(struct damon_ctx *ctx)
265 {
266         ctx->primitive.init = NULL;
267         ctx->primitive.update = NULL;
268         ctx->primitive.prepare_access_checks = damon_pa_prepare_access_checks;
269         ctx->primitive.check_accesses = damon_pa_check_accesses;
270         ctx->primitive.reset_aggregated = NULL;
271         ctx->primitive.target_valid = damon_pa_target_valid;
272         ctx->primitive.cleanup = NULL;
273         ctx->primitive.apply_scheme = damon_pa_apply_scheme;
274         ctx->primitive.get_scheme_score = damon_pa_scheme_score;
275 }
This page took 0.046732 seconds and 4 git commands to generate.