]>
Commit | Line | Data |
---|---|---|
133ff0ea JG |
1 | /* |
2 | * Copyright 2013 Red Hat Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
f813f219 | 14 | * Authors: Jérôme Glisse <[email protected]> |
133ff0ea JG |
15 | */ |
16 | /* | |
17 | * Refer to include/linux/hmm.h for information about heterogeneous memory | |
18 | * management or HMM for short. | |
19 | */ | |
20 | #include <linux/mm.h> | |
21 | #include <linux/hmm.h> | |
858b54da | 22 | #include <linux/init.h> |
da4c3c73 JG |
23 | #include <linux/rmap.h> |
24 | #include <linux/swap.h> | |
133ff0ea JG |
25 | #include <linux/slab.h> |
26 | #include <linux/sched.h> | |
4ef589dc JG |
27 | #include <linux/mmzone.h> |
28 | #include <linux/pagemap.h> | |
da4c3c73 JG |
29 | #include <linux/swapops.h> |
30 | #include <linux/hugetlb.h> | |
4ef589dc | 31 | #include <linux/memremap.h> |
7b2d55d2 | 32 | #include <linux/jump_label.h> |
c0b12405 | 33 | #include <linux/mmu_notifier.h> |
4ef589dc JG |
34 | #include <linux/memory_hotplug.h> |
35 | ||
36 | #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) | |
133ff0ea | 37 | |
6b368cd4 | 38 | #if IS_ENABLED(CONFIG_HMM_MIRROR) |
c0b12405 JG |
39 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops; |
40 | ||
133ff0ea JG |
41 | /* |
42 | * struct hmm - HMM per mm struct | |
43 | * | |
44 | * @mm: mm struct this HMM struct is bound to | |
da4c3c73 | 45 | * @lock: lock protecting ranges list |
da4c3c73 | 46 | * @ranges: list of range being snapshotted |
c0b12405 JG |
47 | * @mirrors: list of mirrors for this mm |
48 | * @mmu_notifier: mmu notifier to track updates to CPU page table | |
49 | * @mirrors_sem: read/write semaphore protecting the mirrors list | |
133ff0ea JG |
50 | */ |
51 | struct hmm { | |
52 | struct mm_struct *mm; | |
da4c3c73 | 53 | spinlock_t lock; |
da4c3c73 | 54 | struct list_head ranges; |
c0b12405 JG |
55 | struct list_head mirrors; |
56 | struct mmu_notifier mmu_notifier; | |
57 | struct rw_semaphore mirrors_sem; | |
133ff0ea JG |
58 | }; |
59 | ||
60 | /* | |
61 | * hmm_register - register HMM against an mm (HMM internal) | |
62 | * | |
63 | * @mm: mm struct to attach to | |
64 | * | |
65 | * This is not intended to be used directly by device drivers. It allocates an | |
66 | * HMM struct if mm does not have one, and initializes it. | |
67 | */ | |
68 | static struct hmm *hmm_register(struct mm_struct *mm) | |
69 | { | |
c0b12405 JG |
70 | struct hmm *hmm = READ_ONCE(mm->hmm); |
71 | bool cleanup = false; | |
133ff0ea JG |
72 | |
73 | /* | |
74 | * The hmm struct can only be freed once the mm_struct goes away, | |
75 | * hence we should always have pre-allocated an new hmm struct | |
76 | * above. | |
77 | */ | |
c0b12405 JG |
78 | if (hmm) |
79 | return hmm; | |
80 | ||
81 | hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); | |
82 | if (!hmm) | |
83 | return NULL; | |
84 | INIT_LIST_HEAD(&hmm->mirrors); | |
85 | init_rwsem(&hmm->mirrors_sem); | |
c0b12405 | 86 | hmm->mmu_notifier.ops = NULL; |
da4c3c73 JG |
87 | INIT_LIST_HEAD(&hmm->ranges); |
88 | spin_lock_init(&hmm->lock); | |
c0b12405 JG |
89 | hmm->mm = mm; |
90 | ||
c0b12405 JG |
91 | spin_lock(&mm->page_table_lock); |
92 | if (!mm->hmm) | |
93 | mm->hmm = hmm; | |
94 | else | |
95 | cleanup = true; | |
96 | spin_unlock(&mm->page_table_lock); | |
97 | ||
86a2d598 RC |
98 | if (cleanup) |
99 | goto error; | |
100 | ||
101 | /* | |
102 | * We should only get here if hold the mmap_sem in write mode ie on | |
103 | * registration of first mirror through hmm_mirror_register() | |
104 | */ | |
105 | hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; | |
106 | if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) | |
107 | goto error_mm; | |
c0b12405 | 108 | |
133ff0ea | 109 | return mm->hmm; |
86a2d598 RC |
110 | |
111 | error_mm: | |
112 | spin_lock(&mm->page_table_lock); | |
113 | if (mm->hmm == hmm) | |
114 | mm->hmm = NULL; | |
115 | spin_unlock(&mm->page_table_lock); | |
116 | error: | |
117 | kfree(hmm); | |
118 | return NULL; | |
133ff0ea JG |
119 | } |
120 | ||
121 | void hmm_mm_destroy(struct mm_struct *mm) | |
122 | { | |
123 | kfree(mm->hmm); | |
124 | } | |
c0b12405 | 125 | |
ec131b2d | 126 | static int hmm_invalidate_range(struct hmm *hmm, bool device, |
44532d4c | 127 | const struct hmm_update *update) |
c0b12405 JG |
128 | { |
129 | struct hmm_mirror *mirror; | |
da4c3c73 JG |
130 | struct hmm_range *range; |
131 | ||
132 | spin_lock(&hmm->lock); | |
133 | list_for_each_entry(range, &hmm->ranges, list) { | |
134 | unsigned long addr, idx, npages; | |
135 | ||
44532d4c | 136 | if (update->end < range->start || update->start >= range->end) |
da4c3c73 JG |
137 | continue; |
138 | ||
139 | range->valid = false; | |
44532d4c | 140 | addr = max(update->start, range->start); |
da4c3c73 | 141 | idx = (addr - range->start) >> PAGE_SHIFT; |
44532d4c | 142 | npages = (min(range->end, update->end) - addr) >> PAGE_SHIFT; |
da4c3c73 JG |
143 | memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); |
144 | } | |
145 | spin_unlock(&hmm->lock); | |
c0b12405 | 146 | |
ec131b2d JG |
147 | if (!device) |
148 | return 0; | |
149 | ||
c0b12405 | 150 | down_read(&hmm->mirrors_sem); |
44532d4c JG |
151 | list_for_each_entry(mirror, &hmm->mirrors, list) { |
152 | int ret; | |
153 | ||
154 | ret = mirror->ops->sync_cpu_device_pagetables(mirror, update); | |
155 | if (!update->blockable && ret == -EAGAIN) { | |
156 | up_read(&hmm->mirrors_sem); | |
157 | return -EAGAIN; | |
158 | } | |
159 | } | |
c0b12405 | 160 | up_read(&hmm->mirrors_sem); |
44532d4c JG |
161 | |
162 | return 0; | |
c0b12405 JG |
163 | } |
164 | ||
e1401513 RC |
165 | static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) |
166 | { | |
167 | struct hmm_mirror *mirror; | |
168 | struct hmm *hmm = mm->hmm; | |
169 | ||
170 | down_write(&hmm->mirrors_sem); | |
171 | mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, | |
172 | list); | |
173 | while (mirror) { | |
174 | list_del_init(&mirror->list); | |
175 | if (mirror->ops->release) { | |
176 | /* | |
177 | * Drop mirrors_sem so callback can wait on any pending | |
178 | * work that might itself trigger mmu_notifier callback | |
179 | * and thus would deadlock with us. | |
180 | */ | |
181 | up_write(&hmm->mirrors_sem); | |
182 | mirror->ops->release(mirror); | |
183 | down_write(&hmm->mirrors_sem); | |
184 | } | |
185 | mirror = list_first_entry_or_null(&hmm->mirrors, | |
186 | struct hmm_mirror, list); | |
187 | } | |
188 | up_write(&hmm->mirrors_sem); | |
189 | } | |
190 | ||
93065ac7 | 191 | static int hmm_invalidate_range_start(struct mmu_notifier *mn, |
5d6527a7 | 192 | const struct mmu_notifier_range *range) |
c0b12405 | 193 | { |
ec131b2d | 194 | struct hmm_update update; |
5d6527a7 | 195 | struct hmm *hmm = range->mm->hmm; |
c0b12405 JG |
196 | |
197 | VM_BUG_ON(!hmm); | |
198 | ||
5d6527a7 JG |
199 | update.start = range->start; |
200 | update.end = range->end; | |
ec131b2d | 201 | update.event = HMM_UPDATE_INVALIDATE; |
5d6527a7 | 202 | update.blockable = range->blockable; |
ec131b2d | 203 | return hmm_invalidate_range(hmm, true, &update); |
c0b12405 JG |
204 | } |
205 | ||
206 | static void hmm_invalidate_range_end(struct mmu_notifier *mn, | |
5d6527a7 | 207 | const struct mmu_notifier_range *range) |
c0b12405 | 208 | { |
44532d4c | 209 | struct hmm_update update; |
5d6527a7 | 210 | struct hmm *hmm = range->mm->hmm; |
c0b12405 JG |
211 | |
212 | VM_BUG_ON(!hmm); | |
213 | ||
5d6527a7 JG |
214 | update.start = range->start; |
215 | update.end = range->end; | |
44532d4c JG |
216 | update.event = HMM_UPDATE_INVALIDATE; |
217 | update.blockable = true; | |
ec131b2d | 218 | hmm_invalidate_range(hmm, false, &update); |
c0b12405 JG |
219 | } |
220 | ||
221 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { | |
e1401513 | 222 | .release = hmm_release, |
c0b12405 JG |
223 | .invalidate_range_start = hmm_invalidate_range_start, |
224 | .invalidate_range_end = hmm_invalidate_range_end, | |
225 | }; | |
226 | ||
227 | /* | |
228 | * hmm_mirror_register() - register a mirror against an mm | |
229 | * | |
230 | * @mirror: new mirror struct to register | |
231 | * @mm: mm to register against | |
232 | * | |
233 | * To start mirroring a process address space, the device driver must register | |
234 | * an HMM mirror struct. | |
235 | * | |
236 | * THE mm->mmap_sem MUST BE HELD IN WRITE MODE ! | |
237 | */ | |
238 | int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) | |
239 | { | |
240 | /* Sanity check */ | |
241 | if (!mm || !mirror || !mirror->ops) | |
242 | return -EINVAL; | |
243 | ||
c01cbba2 | 244 | again: |
c0b12405 JG |
245 | mirror->hmm = hmm_register(mm); |
246 | if (!mirror->hmm) | |
247 | return -ENOMEM; | |
248 | ||
249 | down_write(&mirror->hmm->mirrors_sem); | |
c01cbba2 JG |
250 | if (mirror->hmm->mm == NULL) { |
251 | /* | |
252 | * A racing hmm_mirror_unregister() is about to destroy the hmm | |
253 | * struct. Try again to allocate a new one. | |
254 | */ | |
255 | up_write(&mirror->hmm->mirrors_sem); | |
256 | mirror->hmm = NULL; | |
257 | goto again; | |
258 | } else { | |
259 | list_add(&mirror->list, &mirror->hmm->mirrors); | |
260 | up_write(&mirror->hmm->mirrors_sem); | |
261 | } | |
c0b12405 JG |
262 | |
263 | return 0; | |
264 | } | |
265 | EXPORT_SYMBOL(hmm_mirror_register); | |
266 | ||
267 | /* | |
268 | * hmm_mirror_unregister() - unregister a mirror | |
269 | * | |
270 | * @mirror: new mirror struct to register | |
271 | * | |
272 | * Stop mirroring a process address space, and cleanup. | |
273 | */ | |
274 | void hmm_mirror_unregister(struct hmm_mirror *mirror) | |
275 | { | |
c01cbba2 JG |
276 | bool should_unregister = false; |
277 | struct mm_struct *mm; | |
278 | struct hmm *hmm; | |
279 | ||
280 | if (mirror->hmm == NULL) | |
281 | return; | |
c0b12405 | 282 | |
c01cbba2 | 283 | hmm = mirror->hmm; |
c0b12405 | 284 | down_write(&hmm->mirrors_sem); |
e1401513 | 285 | list_del_init(&mirror->list); |
c01cbba2 JG |
286 | should_unregister = list_empty(&hmm->mirrors); |
287 | mirror->hmm = NULL; | |
288 | mm = hmm->mm; | |
289 | hmm->mm = NULL; | |
c0b12405 | 290 | up_write(&hmm->mirrors_sem); |
c01cbba2 JG |
291 | |
292 | if (!should_unregister || mm == NULL) | |
293 | return; | |
294 | ||
86a2d598 RC |
295 | mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm); |
296 | ||
c01cbba2 JG |
297 | spin_lock(&mm->page_table_lock); |
298 | if (mm->hmm == hmm) | |
299 | mm->hmm = NULL; | |
300 | spin_unlock(&mm->page_table_lock); | |
301 | ||
c01cbba2 | 302 | kfree(hmm); |
c0b12405 JG |
303 | } |
304 | EXPORT_SYMBOL(hmm_mirror_unregister); | |
da4c3c73 | 305 | |
74eee180 JG |
306 | struct hmm_vma_walk { |
307 | struct hmm_range *range; | |
308 | unsigned long last; | |
309 | bool fault; | |
310 | bool block; | |
74eee180 JG |
311 | }; |
312 | ||
2aee09d8 JG |
313 | static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, |
314 | bool write_fault, uint64_t *pfn) | |
74eee180 JG |
315 | { |
316 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; | |
317 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
f88a1e90 | 318 | struct hmm_range *range = hmm_vma_walk->range; |
74eee180 | 319 | struct vm_area_struct *vma = walk->vma; |
50a7ca3c | 320 | vm_fault_t ret; |
74eee180 JG |
321 | |
322 | flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; | |
2aee09d8 | 323 | flags |= write_fault ? FAULT_FLAG_WRITE : 0; |
50a7ca3c SJ |
324 | ret = handle_mm_fault(vma, addr, flags); |
325 | if (ret & VM_FAULT_RETRY) | |
74eee180 | 326 | return -EBUSY; |
50a7ca3c | 327 | if (ret & VM_FAULT_ERROR) { |
f88a1e90 | 328 | *pfn = range->values[HMM_PFN_ERROR]; |
74eee180 JG |
329 | return -EFAULT; |
330 | } | |
331 | ||
332 | return -EAGAIN; | |
333 | } | |
334 | ||
da4c3c73 JG |
335 | static int hmm_pfns_bad(unsigned long addr, |
336 | unsigned long end, | |
337 | struct mm_walk *walk) | |
338 | { | |
c719547f JG |
339 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
340 | struct hmm_range *range = hmm_vma_walk->range; | |
ff05c0c6 | 341 | uint64_t *pfns = range->pfns; |
da4c3c73 JG |
342 | unsigned long i; |
343 | ||
344 | i = (addr - range->start) >> PAGE_SHIFT; | |
345 | for (; addr < end; addr += PAGE_SIZE, i++) | |
f88a1e90 | 346 | pfns[i] = range->values[HMM_PFN_ERROR]; |
da4c3c73 JG |
347 | |
348 | return 0; | |
349 | } | |
350 | ||
5504ed29 JG |
351 | /* |
352 | * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) | |
353 | * @start: range virtual start address (inclusive) | |
354 | * @end: range virtual end address (exclusive) | |
2aee09d8 JG |
355 | * @fault: should we fault or not ? |
356 | * @write_fault: write fault ? | |
5504ed29 JG |
357 | * @walk: mm_walk structure |
358 | * Returns: 0 on success, -EAGAIN after page fault, or page fault error | |
359 | * | |
360 | * This function will be called whenever pmd_none() or pte_none() returns true, | |
361 | * or whenever there is no page directory covering the virtual address range. | |
362 | */ | |
2aee09d8 JG |
363 | static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, |
364 | bool fault, bool write_fault, | |
365 | struct mm_walk *walk) | |
da4c3c73 | 366 | { |
74eee180 JG |
367 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
368 | struct hmm_range *range = hmm_vma_walk->range; | |
ff05c0c6 | 369 | uint64_t *pfns = range->pfns; |
da4c3c73 JG |
370 | unsigned long i; |
371 | ||
74eee180 | 372 | hmm_vma_walk->last = addr; |
da4c3c73 | 373 | i = (addr - range->start) >> PAGE_SHIFT; |
74eee180 | 374 | for (; addr < end; addr += PAGE_SIZE, i++) { |
f88a1e90 | 375 | pfns[i] = range->values[HMM_PFN_NONE]; |
2aee09d8 | 376 | if (fault || write_fault) { |
74eee180 | 377 | int ret; |
da4c3c73 | 378 | |
2aee09d8 JG |
379 | ret = hmm_vma_do_fault(walk, addr, write_fault, |
380 | &pfns[i]); | |
74eee180 JG |
381 | if (ret != -EAGAIN) |
382 | return ret; | |
383 | } | |
384 | } | |
385 | ||
2aee09d8 JG |
386 | return (fault || write_fault) ? -EAGAIN : 0; |
387 | } | |
388 | ||
389 | static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, | |
390 | uint64_t pfns, uint64_t cpu_flags, | |
391 | bool *fault, bool *write_fault) | |
392 | { | |
f88a1e90 JG |
393 | struct hmm_range *range = hmm_vma_walk->range; |
394 | ||
2aee09d8 JG |
395 | *fault = *write_fault = false; |
396 | if (!hmm_vma_walk->fault) | |
397 | return; | |
398 | ||
399 | /* We aren't ask to do anything ... */ | |
f88a1e90 | 400 | if (!(pfns & range->flags[HMM_PFN_VALID])) |
2aee09d8 | 401 | return; |
f88a1e90 JG |
402 | /* If this is device memory than only fault if explicitly requested */ |
403 | if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { | |
404 | /* Do we fault on device memory ? */ | |
405 | if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { | |
406 | *write_fault = pfns & range->flags[HMM_PFN_WRITE]; | |
407 | *fault = true; | |
408 | } | |
2aee09d8 JG |
409 | return; |
410 | } | |
f88a1e90 JG |
411 | |
412 | /* If CPU page table is not valid then we need to fault */ | |
413 | *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); | |
414 | /* Need to write fault ? */ | |
415 | if ((pfns & range->flags[HMM_PFN_WRITE]) && | |
416 | !(cpu_flags & range->flags[HMM_PFN_WRITE])) { | |
417 | *write_fault = true; | |
2aee09d8 JG |
418 | *fault = true; |
419 | } | |
420 | } | |
421 | ||
422 | static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, | |
423 | const uint64_t *pfns, unsigned long npages, | |
424 | uint64_t cpu_flags, bool *fault, | |
425 | bool *write_fault) | |
426 | { | |
427 | unsigned long i; | |
428 | ||
429 | if (!hmm_vma_walk->fault) { | |
430 | *fault = *write_fault = false; | |
431 | return; | |
432 | } | |
433 | ||
434 | for (i = 0; i < npages; ++i) { | |
435 | hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, | |
436 | fault, write_fault); | |
437 | if ((*fault) || (*write_fault)) | |
438 | return; | |
439 | } | |
440 | } | |
441 | ||
442 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, | |
443 | struct mm_walk *walk) | |
444 | { | |
445 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
446 | struct hmm_range *range = hmm_vma_walk->range; | |
447 | bool fault, write_fault; | |
448 | unsigned long i, npages; | |
449 | uint64_t *pfns; | |
450 | ||
451 | i = (addr - range->start) >> PAGE_SHIFT; | |
452 | npages = (end - addr) >> PAGE_SHIFT; | |
453 | pfns = &range->pfns[i]; | |
454 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, | |
455 | 0, &fault, &write_fault); | |
456 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); | |
457 | } | |
458 | ||
f88a1e90 | 459 | static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) |
2aee09d8 JG |
460 | { |
461 | if (pmd_protnone(pmd)) | |
462 | return 0; | |
f88a1e90 JG |
463 | return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | |
464 | range->flags[HMM_PFN_WRITE] : | |
465 | range->flags[HMM_PFN_VALID]; | |
da4c3c73 JG |
466 | } |
467 | ||
53f5c3f4 JG |
468 | static int hmm_vma_handle_pmd(struct mm_walk *walk, |
469 | unsigned long addr, | |
470 | unsigned long end, | |
471 | uint64_t *pfns, | |
472 | pmd_t pmd) | |
473 | { | |
474 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
f88a1e90 | 475 | struct hmm_range *range = hmm_vma_walk->range; |
2aee09d8 | 476 | unsigned long pfn, npages, i; |
2aee09d8 | 477 | bool fault, write_fault; |
f88a1e90 | 478 | uint64_t cpu_flags; |
53f5c3f4 | 479 | |
2aee09d8 | 480 | npages = (end - addr) >> PAGE_SHIFT; |
f88a1e90 | 481 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
2aee09d8 JG |
482 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, |
483 | &fault, &write_fault); | |
53f5c3f4 | 484 | |
2aee09d8 JG |
485 | if (pmd_protnone(pmd) || fault || write_fault) |
486 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); | |
53f5c3f4 JG |
487 | |
488 | pfn = pmd_pfn(pmd) + pte_index(addr); | |
53f5c3f4 | 489 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) |
f88a1e90 | 490 | pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; |
53f5c3f4 JG |
491 | hmm_vma_walk->last = end; |
492 | return 0; | |
493 | } | |
494 | ||
f88a1e90 | 495 | static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) |
2aee09d8 JG |
496 | { |
497 | if (pte_none(pte) || !pte_present(pte)) | |
498 | return 0; | |
f88a1e90 JG |
499 | return pte_write(pte) ? range->flags[HMM_PFN_VALID] | |
500 | range->flags[HMM_PFN_WRITE] : | |
501 | range->flags[HMM_PFN_VALID]; | |
2aee09d8 JG |
502 | } |
503 | ||
53f5c3f4 JG |
504 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, |
505 | unsigned long end, pmd_t *pmdp, pte_t *ptep, | |
506 | uint64_t *pfn) | |
507 | { | |
508 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
f88a1e90 | 509 | struct hmm_range *range = hmm_vma_walk->range; |
53f5c3f4 | 510 | struct vm_area_struct *vma = walk->vma; |
2aee09d8 JG |
511 | bool fault, write_fault; |
512 | uint64_t cpu_flags; | |
53f5c3f4 | 513 | pte_t pte = *ptep; |
f88a1e90 | 514 | uint64_t orig_pfn = *pfn; |
53f5c3f4 | 515 | |
f88a1e90 JG |
516 | *pfn = range->values[HMM_PFN_NONE]; |
517 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); | |
518 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, | |
2aee09d8 | 519 | &fault, &write_fault); |
53f5c3f4 JG |
520 | |
521 | if (pte_none(pte)) { | |
2aee09d8 | 522 | if (fault || write_fault) |
53f5c3f4 JG |
523 | goto fault; |
524 | return 0; | |
525 | } | |
526 | ||
527 | if (!pte_present(pte)) { | |
528 | swp_entry_t entry = pte_to_swp_entry(pte); | |
529 | ||
530 | if (!non_swap_entry(entry)) { | |
2aee09d8 | 531 | if (fault || write_fault) |
53f5c3f4 JG |
532 | goto fault; |
533 | return 0; | |
534 | } | |
535 | ||
536 | /* | |
537 | * This is a special swap entry, ignore migration, use | |
538 | * device and report anything else as error. | |
539 | */ | |
540 | if (is_device_private_entry(entry)) { | |
f88a1e90 JG |
541 | cpu_flags = range->flags[HMM_PFN_VALID] | |
542 | range->flags[HMM_PFN_DEVICE_PRIVATE]; | |
2aee09d8 | 543 | cpu_flags |= is_write_device_private_entry(entry) ? |
f88a1e90 JG |
544 | range->flags[HMM_PFN_WRITE] : 0; |
545 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, | |
546 | &fault, &write_fault); | |
547 | if (fault || write_fault) | |
548 | goto fault; | |
549 | *pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); | |
550 | *pfn |= cpu_flags; | |
53f5c3f4 JG |
551 | return 0; |
552 | } | |
553 | ||
554 | if (is_migration_entry(entry)) { | |
2aee09d8 | 555 | if (fault || write_fault) { |
53f5c3f4 JG |
556 | pte_unmap(ptep); |
557 | hmm_vma_walk->last = addr; | |
558 | migration_entry_wait(vma->vm_mm, | |
2aee09d8 | 559 | pmdp, addr); |
53f5c3f4 JG |
560 | return -EAGAIN; |
561 | } | |
562 | return 0; | |
563 | } | |
564 | ||
565 | /* Report error for everything else */ | |
f88a1e90 | 566 | *pfn = range->values[HMM_PFN_ERROR]; |
53f5c3f4 JG |
567 | return -EFAULT; |
568 | } | |
569 | ||
2aee09d8 | 570 | if (fault || write_fault) |
53f5c3f4 JG |
571 | goto fault; |
572 | ||
f88a1e90 | 573 | *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; |
53f5c3f4 JG |
574 | return 0; |
575 | ||
576 | fault: | |
577 | pte_unmap(ptep); | |
578 | /* Fault any virtual address we were asked to fault */ | |
2aee09d8 | 579 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
53f5c3f4 JG |
580 | } |
581 | ||
da4c3c73 JG |
582 | static int hmm_vma_walk_pmd(pmd_t *pmdp, |
583 | unsigned long start, | |
584 | unsigned long end, | |
585 | struct mm_walk *walk) | |
586 | { | |
74eee180 JG |
587 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
588 | struct hmm_range *range = hmm_vma_walk->range; | |
d08faca0 | 589 | struct vm_area_struct *vma = walk->vma; |
ff05c0c6 | 590 | uint64_t *pfns = range->pfns; |
da4c3c73 | 591 | unsigned long addr = start, i; |
da4c3c73 | 592 | pte_t *ptep; |
d08faca0 | 593 | pmd_t pmd; |
da4c3c73 | 594 | |
da4c3c73 JG |
595 | |
596 | again: | |
d08faca0 JG |
597 | pmd = READ_ONCE(*pmdp); |
598 | if (pmd_none(pmd)) | |
da4c3c73 JG |
599 | return hmm_vma_walk_hole(start, end, walk); |
600 | ||
d08faca0 | 601 | if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB)) |
da4c3c73 JG |
602 | return hmm_pfns_bad(start, end, walk); |
603 | ||
d08faca0 JG |
604 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
605 | bool fault, write_fault; | |
606 | unsigned long npages; | |
607 | uint64_t *pfns; | |
608 | ||
609 | i = (addr - range->start) >> PAGE_SHIFT; | |
610 | npages = (end - addr) >> PAGE_SHIFT; | |
611 | pfns = &range->pfns[i]; | |
612 | ||
613 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, | |
614 | 0, &fault, &write_fault); | |
615 | if (fault || write_fault) { | |
616 | hmm_vma_walk->last = addr; | |
617 | pmd_migration_entry_wait(vma->vm_mm, pmdp); | |
618 | return -EAGAIN; | |
619 | } | |
620 | return 0; | |
621 | } else if (!pmd_present(pmd)) | |
622 | return hmm_pfns_bad(start, end, walk); | |
da4c3c73 | 623 | |
d08faca0 | 624 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
da4c3c73 JG |
625 | /* |
626 | * No need to take pmd_lock here, even if some other threads | |
627 | * is splitting the huge pmd we will get that event through | |
628 | * mmu_notifier callback. | |
629 | * | |
630 | * So just read pmd value and check again its a transparent | |
631 | * huge or device mapping one and compute corresponding pfn | |
632 | * values. | |
633 | */ | |
634 | pmd = pmd_read_atomic(pmdp); | |
635 | barrier(); | |
636 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) | |
637 | goto again; | |
74eee180 | 638 | |
d08faca0 | 639 | i = (addr - range->start) >> PAGE_SHIFT; |
53f5c3f4 | 640 | return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); |
da4c3c73 JG |
641 | } |
642 | ||
d08faca0 JG |
643 | /* |
644 | * We have handled all the valid case above ie either none, migration, | |
645 | * huge or transparent huge. At this point either it is a valid pmd | |
646 | * entry pointing to pte directory or it is a bad pmd that will not | |
647 | * recover. | |
648 | */ | |
649 | if (pmd_bad(pmd)) | |
da4c3c73 JG |
650 | return hmm_pfns_bad(start, end, walk); |
651 | ||
652 | ptep = pte_offset_map(pmdp, addr); | |
d08faca0 | 653 | i = (addr - range->start) >> PAGE_SHIFT; |
da4c3c73 | 654 | for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { |
53f5c3f4 | 655 | int r; |
74eee180 | 656 | |
53f5c3f4 JG |
657 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); |
658 | if (r) { | |
659 | /* hmm_vma_handle_pte() did unmap pte directory */ | |
660 | hmm_vma_walk->last = addr; | |
661 | return r; | |
74eee180 | 662 | } |
da4c3c73 JG |
663 | } |
664 | pte_unmap(ptep - 1); | |
665 | ||
53f5c3f4 | 666 | hmm_vma_walk->last = addr; |
da4c3c73 JG |
667 | return 0; |
668 | } | |
669 | ||
f88a1e90 JG |
670 | static void hmm_pfns_clear(struct hmm_range *range, |
671 | uint64_t *pfns, | |
33cd47dc JG |
672 | unsigned long addr, |
673 | unsigned long end) | |
674 | { | |
675 | for (; addr < end; addr += PAGE_SIZE, pfns++) | |
f88a1e90 | 676 | *pfns = range->values[HMM_PFN_NONE]; |
33cd47dc JG |
677 | } |
678 | ||
855ce7d2 JG |
679 | static void hmm_pfns_special(struct hmm_range *range) |
680 | { | |
681 | unsigned long addr = range->start, i = 0; | |
682 | ||
683 | for (; addr < range->end; addr += PAGE_SIZE, i++) | |
f88a1e90 | 684 | range->pfns[i] = range->values[HMM_PFN_SPECIAL]; |
855ce7d2 JG |
685 | } |
686 | ||
da4c3c73 JG |
687 | /* |
688 | * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses | |
08232a45 | 689 | * @range: range being snapshotted |
86586a41 JG |
690 | * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid |
691 | * vma permission, 0 success | |
da4c3c73 JG |
692 | * |
693 | * This snapshots the CPU page table for a range of virtual addresses. Snapshot | |
694 | * validity is tracked by range struct. See hmm_vma_range_done() for further | |
695 | * information. | |
696 | * | |
697 | * The range struct is initialized here. It tracks the CPU page table, but only | |
698 | * if the function returns success (0), in which case the caller must then call | |
699 | * hmm_vma_range_done() to stop CPU page table update tracking on this range. | |
700 | * | |
701 | * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS | |
702 | * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED ! | |
703 | */ | |
08232a45 | 704 | int hmm_vma_get_pfns(struct hmm_range *range) |
da4c3c73 | 705 | { |
08232a45 | 706 | struct vm_area_struct *vma = range->vma; |
74eee180 | 707 | struct hmm_vma_walk hmm_vma_walk; |
da4c3c73 JG |
708 | struct mm_walk mm_walk; |
709 | struct hmm *hmm; | |
710 | ||
da4c3c73 | 711 | /* Sanity check, this really should not happen ! */ |
08232a45 | 712 | if (range->start < vma->vm_start || range->start >= vma->vm_end) |
da4c3c73 | 713 | return -EINVAL; |
08232a45 | 714 | if (range->end < vma->vm_start || range->end > vma->vm_end) |
da4c3c73 JG |
715 | return -EINVAL; |
716 | ||
717 | hmm = hmm_register(vma->vm_mm); | |
718 | if (!hmm) | |
719 | return -ENOMEM; | |
720 | /* Caller must have registered a mirror, via hmm_mirror_register() ! */ | |
721 | if (!hmm->mmu_notifier.ops) | |
722 | return -EINVAL; | |
723 | ||
855ce7d2 | 724 | /* FIXME support hugetlb fs */ |
e1fb4a08 DJ |
725 | if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || |
726 | vma_is_dax(vma)) { | |
855ce7d2 JG |
727 | hmm_pfns_special(range); |
728 | return -EINVAL; | |
729 | } | |
730 | ||
86586a41 JG |
731 | if (!(vma->vm_flags & VM_READ)) { |
732 | /* | |
733 | * If vma do not allow read access, then assume that it does | |
734 | * not allow write access, either. Architecture that allow | |
735 | * write without read access are not supported by HMM, because | |
736 | * operations such has atomic access would not work. | |
737 | */ | |
f88a1e90 | 738 | hmm_pfns_clear(range, range->pfns, range->start, range->end); |
86586a41 JG |
739 | return -EPERM; |
740 | } | |
741 | ||
da4c3c73 | 742 | /* Initialize range to track CPU page table update */ |
da4c3c73 JG |
743 | spin_lock(&hmm->lock); |
744 | range->valid = true; | |
745 | list_add_rcu(&range->list, &hmm->ranges); | |
746 | spin_unlock(&hmm->lock); | |
747 | ||
74eee180 JG |
748 | hmm_vma_walk.fault = false; |
749 | hmm_vma_walk.range = range; | |
750 | mm_walk.private = &hmm_vma_walk; | |
751 | ||
da4c3c73 JG |
752 | mm_walk.vma = vma; |
753 | mm_walk.mm = vma->vm_mm; | |
da4c3c73 JG |
754 | mm_walk.pte_entry = NULL; |
755 | mm_walk.test_walk = NULL; | |
756 | mm_walk.hugetlb_entry = NULL; | |
757 | mm_walk.pmd_entry = hmm_vma_walk_pmd; | |
758 | mm_walk.pte_hole = hmm_vma_walk_hole; | |
759 | ||
08232a45 | 760 | walk_page_range(range->start, range->end, &mm_walk); |
da4c3c73 JG |
761 | return 0; |
762 | } | |
763 | EXPORT_SYMBOL(hmm_vma_get_pfns); | |
764 | ||
765 | /* | |
766 | * hmm_vma_range_done() - stop tracking change to CPU page table over a range | |
da4c3c73 JG |
767 | * @range: range being tracked |
768 | * Returns: false if range data has been invalidated, true otherwise | |
769 | * | |
770 | * Range struct is used to track updates to the CPU page table after a call to | |
771 | * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done | |
772 | * using the data, or wants to lock updates to the data it got from those | |
773 | * functions, it must call the hmm_vma_range_done() function, which will then | |
774 | * stop tracking CPU page table updates. | |
775 | * | |
776 | * Note that device driver must still implement general CPU page table update | |
777 | * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using | |
778 | * the mmu_notifier API directly. | |
779 | * | |
780 | * CPU page table update tracking done through hmm_range is only temporary and | |
781 | * to be used while trying to duplicate CPU page table contents for a range of | |
782 | * virtual addresses. | |
783 | * | |
784 | * There are two ways to use this : | |
785 | * again: | |
08232a45 | 786 | * hmm_vma_get_pfns(range); or hmm_vma_fault(...); |
da4c3c73 JG |
787 | * trans = device_build_page_table_update_transaction(pfns); |
788 | * device_page_table_lock(); | |
08232a45 | 789 | * if (!hmm_vma_range_done(range)) { |
da4c3c73 JG |
790 | * device_page_table_unlock(); |
791 | * goto again; | |
792 | * } | |
793 | * device_commit_transaction(trans); | |
794 | * device_page_table_unlock(); | |
795 | * | |
796 | * Or: | |
08232a45 | 797 | * hmm_vma_get_pfns(range); or hmm_vma_fault(...); |
da4c3c73 | 798 | * device_page_table_lock(); |
08232a45 JG |
799 | * hmm_vma_range_done(range); |
800 | * device_update_page_table(range->pfns); | |
da4c3c73 JG |
801 | * device_page_table_unlock(); |
802 | */ | |
08232a45 | 803 | bool hmm_vma_range_done(struct hmm_range *range) |
da4c3c73 JG |
804 | { |
805 | unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; | |
806 | struct hmm *hmm; | |
807 | ||
808 | if (range->end <= range->start) { | |
809 | BUG(); | |
810 | return false; | |
811 | } | |
812 | ||
08232a45 | 813 | hmm = hmm_register(range->vma->vm_mm); |
da4c3c73 JG |
814 | if (!hmm) { |
815 | memset(range->pfns, 0, sizeof(*range->pfns) * npages); | |
816 | return false; | |
817 | } | |
818 | ||
819 | spin_lock(&hmm->lock); | |
820 | list_del_rcu(&range->list); | |
821 | spin_unlock(&hmm->lock); | |
822 | ||
823 | return range->valid; | |
824 | } | |
825 | EXPORT_SYMBOL(hmm_vma_range_done); | |
74eee180 JG |
826 | |
827 | /* | |
828 | * hmm_vma_fault() - try to fault some address in a virtual address range | |
08232a45 | 829 | * @range: range being faulted |
74eee180 JG |
830 | * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) |
831 | * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop) | |
832 | * | |
833 | * This is similar to a regular CPU page fault except that it will not trigger | |
834 | * any memory migration if the memory being faulted is not accessible by CPUs. | |
835 | * | |
ff05c0c6 JG |
836 | * On error, for one virtual address in the range, the function will mark the |
837 | * corresponding HMM pfn entry with an error flag. | |
74eee180 JG |
838 | * |
839 | * Expected use pattern: | |
840 | * retry: | |
841 | * down_read(&mm->mmap_sem); | |
842 | * // Find vma and address device wants to fault, initialize hmm_pfn_t | |
843 | * // array accordingly | |
08232a45 | 844 | * ret = hmm_vma_fault(range, write, block); |
74eee180 JG |
845 | * switch (ret) { |
846 | * case -EAGAIN: | |
08232a45 | 847 | * hmm_vma_range_done(range); |
74eee180 JG |
848 | * // You might want to rate limit or yield to play nicely, you may |
849 | * // also commit any valid pfn in the array assuming that you are | |
850 | * // getting true from hmm_vma_range_monitor_end() | |
851 | * goto retry; | |
852 | * case 0: | |
853 | * break; | |
86586a41 JG |
854 | * case -ENOMEM: |
855 | * case -EINVAL: | |
856 | * case -EPERM: | |
74eee180 JG |
857 | * default: |
858 | * // Handle error ! | |
859 | * up_read(&mm->mmap_sem) | |
860 | * return; | |
861 | * } | |
862 | * // Take device driver lock that serialize device page table update | |
863 | * driver_lock_device_page_table_update(); | |
08232a45 | 864 | * hmm_vma_range_done(range); |
74eee180 JG |
865 | * // Commit pfns we got from hmm_vma_fault() |
866 | * driver_unlock_device_page_table_update(); | |
867 | * up_read(&mm->mmap_sem) | |
868 | * | |
869 | * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0) | |
870 | * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION ! | |
871 | * | |
872 | * YOU HAVE BEEN WARNED ! | |
873 | */ | |
2aee09d8 | 874 | int hmm_vma_fault(struct hmm_range *range, bool block) |
74eee180 | 875 | { |
08232a45 JG |
876 | struct vm_area_struct *vma = range->vma; |
877 | unsigned long start = range->start; | |
74eee180 JG |
878 | struct hmm_vma_walk hmm_vma_walk; |
879 | struct mm_walk mm_walk; | |
880 | struct hmm *hmm; | |
881 | int ret; | |
882 | ||
883 | /* Sanity check, this really should not happen ! */ | |
08232a45 | 884 | if (range->start < vma->vm_start || range->start >= vma->vm_end) |
74eee180 | 885 | return -EINVAL; |
08232a45 | 886 | if (range->end < vma->vm_start || range->end > vma->vm_end) |
74eee180 JG |
887 | return -EINVAL; |
888 | ||
889 | hmm = hmm_register(vma->vm_mm); | |
890 | if (!hmm) { | |
f88a1e90 | 891 | hmm_pfns_clear(range, range->pfns, range->start, range->end); |
74eee180 JG |
892 | return -ENOMEM; |
893 | } | |
894 | /* Caller must have registered a mirror using hmm_mirror_register() */ | |
895 | if (!hmm->mmu_notifier.ops) | |
896 | return -EINVAL; | |
897 | ||
855ce7d2 | 898 | /* FIXME support hugetlb fs */ |
e1fb4a08 DJ |
899 | if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || |
900 | vma_is_dax(vma)) { | |
855ce7d2 JG |
901 | hmm_pfns_special(range); |
902 | return -EINVAL; | |
903 | } | |
904 | ||
86586a41 JG |
905 | if (!(vma->vm_flags & VM_READ)) { |
906 | /* | |
907 | * If vma do not allow read access, then assume that it does | |
908 | * not allow write access, either. Architecture that allow | |
909 | * write without read access are not supported by HMM, because | |
910 | * operations such has atomic access would not work. | |
911 | */ | |
f88a1e90 | 912 | hmm_pfns_clear(range, range->pfns, range->start, range->end); |
86586a41 JG |
913 | return -EPERM; |
914 | } | |
74eee180 | 915 | |
86586a41 JG |
916 | /* Initialize range to track CPU page table update */ |
917 | spin_lock(&hmm->lock); | |
918 | range->valid = true; | |
919 | list_add_rcu(&range->list, &hmm->ranges); | |
920 | spin_unlock(&hmm->lock); | |
921 | ||
74eee180 | 922 | hmm_vma_walk.fault = true; |
74eee180 JG |
923 | hmm_vma_walk.block = block; |
924 | hmm_vma_walk.range = range; | |
925 | mm_walk.private = &hmm_vma_walk; | |
926 | hmm_vma_walk.last = range->start; | |
927 | ||
928 | mm_walk.vma = vma; | |
929 | mm_walk.mm = vma->vm_mm; | |
930 | mm_walk.pte_entry = NULL; | |
931 | mm_walk.test_walk = NULL; | |
932 | mm_walk.hugetlb_entry = NULL; | |
933 | mm_walk.pmd_entry = hmm_vma_walk_pmd; | |
934 | mm_walk.pte_hole = hmm_vma_walk_hole; | |
935 | ||
936 | do { | |
08232a45 | 937 | ret = walk_page_range(start, range->end, &mm_walk); |
74eee180 JG |
938 | start = hmm_vma_walk.last; |
939 | } while (ret == -EAGAIN); | |
940 | ||
941 | if (ret) { | |
942 | unsigned long i; | |
943 | ||
944 | i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; | |
f88a1e90 JG |
945 | hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last, |
946 | range->end); | |
08232a45 | 947 | hmm_vma_range_done(range); |
74eee180 JG |
948 | } |
949 | return ret; | |
950 | } | |
951 | EXPORT_SYMBOL(hmm_vma_fault); | |
c0b12405 | 952 | #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ |
4ef589dc JG |
953 | |
954 | ||
df6ad698 | 955 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
4ef589dc JG |
956 | struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, |
957 | unsigned long addr) | |
958 | { | |
959 | struct page *page; | |
960 | ||
961 | page = alloc_page_vma(GFP_HIGHUSER, vma, addr); | |
962 | if (!page) | |
963 | return NULL; | |
964 | lock_page(page); | |
965 | return page; | |
966 | } | |
967 | EXPORT_SYMBOL(hmm_vma_alloc_locked_page); | |
968 | ||
969 | ||
970 | static void hmm_devmem_ref_release(struct percpu_ref *ref) | |
971 | { | |
972 | struct hmm_devmem *devmem; | |
973 | ||
974 | devmem = container_of(ref, struct hmm_devmem, ref); | |
975 | complete(&devmem->completion); | |
976 | } | |
977 | ||
978 | static void hmm_devmem_ref_exit(void *data) | |
979 | { | |
980 | struct percpu_ref *ref = data; | |
981 | struct hmm_devmem *devmem; | |
982 | ||
983 | devmem = container_of(ref, struct hmm_devmem, ref); | |
bbecd94e | 984 | wait_for_completion(&devmem->completion); |
4ef589dc | 985 | percpu_ref_exit(ref); |
4ef589dc JG |
986 | } |
987 | ||
bbecd94e | 988 | static void hmm_devmem_ref_kill(struct percpu_ref *ref) |
4ef589dc | 989 | { |
4ef589dc | 990 | percpu_ref_kill(ref); |
4ef589dc JG |
991 | } |
992 | ||
993 | static int hmm_devmem_fault(struct vm_area_struct *vma, | |
994 | unsigned long addr, | |
995 | const struct page *page, | |
996 | unsigned int flags, | |
997 | pmd_t *pmdp) | |
998 | { | |
999 | struct hmm_devmem *devmem = page->pgmap->data; | |
1000 | ||
1001 | return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); | |
1002 | } | |
1003 | ||
1004 | static void hmm_devmem_free(struct page *page, void *data) | |
1005 | { | |
1006 | struct hmm_devmem *devmem = data; | |
1007 | ||
2fa147bd DW |
1008 | page->mapping = NULL; |
1009 | ||
4ef589dc JG |
1010 | devmem->ops->free(devmem, page); |
1011 | } | |
1012 | ||
4ef589dc JG |
1013 | /* |
1014 | * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory | |
1015 | * | |
1016 | * @ops: memory event device driver callback (see struct hmm_devmem_ops) | |
1017 | * @device: device struct to bind the resource too | |
1018 | * @size: size in bytes of the device memory to add | |
1019 | * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise | |
1020 | * | |
1021 | * This function first finds an empty range of physical address big enough to | |
1022 | * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which | |
1023 | * in turn allocates struct pages. It does not do anything beyond that; all | |
1024 | * events affecting the memory will go through the various callbacks provided | |
1025 | * by hmm_devmem_ops struct. | |
1026 | * | |
1027 | * Device driver should call this function during device initialization and | |
1028 | * is then responsible of memory management. HMM only provides helpers. | |
1029 | */ | |
1030 | struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |
1031 | struct device *device, | |
1032 | unsigned long size) | |
1033 | { | |
1034 | struct hmm_devmem *devmem; | |
1035 | resource_size_t addr; | |
bbecd94e | 1036 | void *result; |
4ef589dc JG |
1037 | int ret; |
1038 | ||
e7638488 | 1039 | dev_pagemap_get_ops(); |
4ef589dc | 1040 | |
58ef15b7 | 1041 | devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
4ef589dc JG |
1042 | if (!devmem) |
1043 | return ERR_PTR(-ENOMEM); | |
1044 | ||
1045 | init_completion(&devmem->completion); | |
1046 | devmem->pfn_first = -1UL; | |
1047 | devmem->pfn_last = -1UL; | |
1048 | devmem->resource = NULL; | |
1049 | devmem->device = device; | |
1050 | devmem->ops = ops; | |
1051 | ||
1052 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, | |
1053 | 0, GFP_KERNEL); | |
1054 | if (ret) | |
58ef15b7 | 1055 | return ERR_PTR(ret); |
4ef589dc | 1056 | |
58ef15b7 | 1057 | ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); |
4ef589dc | 1058 | if (ret) |
58ef15b7 | 1059 | return ERR_PTR(ret); |
4ef589dc JG |
1060 | |
1061 | size = ALIGN(size, PA_SECTION_SIZE); | |
1062 | addr = min((unsigned long)iomem_resource.end, | |
1063 | (1UL << MAX_PHYSMEM_BITS) - 1); | |
1064 | addr = addr - size + 1UL; | |
1065 | ||
1066 | /* | |
1067 | * FIXME add a new helper to quickly walk resource tree and find free | |
1068 | * range | |
1069 | * | |
1070 | * FIXME what about ioport_resource resource ? | |
1071 | */ | |
1072 | for (; addr > size && addr >= iomem_resource.start; addr -= size) { | |
1073 | ret = region_intersects(addr, size, 0, IORES_DESC_NONE); | |
1074 | if (ret != REGION_DISJOINT) | |
1075 | continue; | |
1076 | ||
1077 | devmem->resource = devm_request_mem_region(device, addr, size, | |
1078 | dev_name(device)); | |
58ef15b7 DW |
1079 | if (!devmem->resource) |
1080 | return ERR_PTR(-ENOMEM); | |
4ef589dc JG |
1081 | break; |
1082 | } | |
58ef15b7 DW |
1083 | if (!devmem->resource) |
1084 | return ERR_PTR(-ERANGE); | |
4ef589dc JG |
1085 | |
1086 | devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; | |
1087 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; | |
1088 | devmem->pfn_last = devmem->pfn_first + | |
1089 | (resource_size(devmem->resource) >> PAGE_SHIFT); | |
063a7d1d | 1090 | devmem->page_fault = hmm_devmem_fault; |
4ef589dc | 1091 | |
bbecd94e DW |
1092 | devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; |
1093 | devmem->pagemap.res = *devmem->resource; | |
bbecd94e DW |
1094 | devmem->pagemap.page_free = hmm_devmem_free; |
1095 | devmem->pagemap.altmap_valid = false; | |
1096 | devmem->pagemap.ref = &devmem->ref; | |
1097 | devmem->pagemap.data = devmem; | |
1098 | devmem->pagemap.kill = hmm_devmem_ref_kill; | |
4ef589dc | 1099 | |
bbecd94e DW |
1100 | result = devm_memremap_pages(devmem->device, &devmem->pagemap); |
1101 | if (IS_ERR(result)) | |
1102 | return result; | |
4ef589dc | 1103 | return devmem; |
4ef589dc | 1104 | } |
02917e9f | 1105 | EXPORT_SYMBOL_GPL(hmm_devmem_add); |
4ef589dc | 1106 | |
d3df0a42 JG |
1107 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
1108 | struct device *device, | |
1109 | struct resource *res) | |
1110 | { | |
1111 | struct hmm_devmem *devmem; | |
bbecd94e | 1112 | void *result; |
d3df0a42 JG |
1113 | int ret; |
1114 | ||
1115 | if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) | |
1116 | return ERR_PTR(-EINVAL); | |
1117 | ||
e7638488 | 1118 | dev_pagemap_get_ops(); |
d3df0a42 | 1119 | |
58ef15b7 | 1120 | devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
d3df0a42 JG |
1121 | if (!devmem) |
1122 | return ERR_PTR(-ENOMEM); | |
1123 | ||
1124 | init_completion(&devmem->completion); | |
1125 | devmem->pfn_first = -1UL; | |
1126 | devmem->pfn_last = -1UL; | |
1127 | devmem->resource = res; | |
1128 | devmem->device = device; | |
1129 | devmem->ops = ops; | |
1130 | ||
1131 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, | |
1132 | 0, GFP_KERNEL); | |
1133 | if (ret) | |
58ef15b7 | 1134 | return ERR_PTR(ret); |
d3df0a42 | 1135 | |
58ef15b7 DW |
1136 | ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, |
1137 | &devmem->ref); | |
d3df0a42 | 1138 | if (ret) |
58ef15b7 | 1139 | return ERR_PTR(ret); |
d3df0a42 JG |
1140 | |
1141 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; | |
1142 | devmem->pfn_last = devmem->pfn_first + | |
1143 | (resource_size(devmem->resource) >> PAGE_SHIFT); | |
063a7d1d | 1144 | devmem->page_fault = hmm_devmem_fault; |
d3df0a42 | 1145 | |
bbecd94e DW |
1146 | devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; |
1147 | devmem->pagemap.res = *devmem->resource; | |
bbecd94e DW |
1148 | devmem->pagemap.page_free = hmm_devmem_free; |
1149 | devmem->pagemap.altmap_valid = false; | |
1150 | devmem->pagemap.ref = &devmem->ref; | |
1151 | devmem->pagemap.data = devmem; | |
1152 | devmem->pagemap.kill = hmm_devmem_ref_kill; | |
d3df0a42 | 1153 | |
bbecd94e DW |
1154 | result = devm_memremap_pages(devmem->device, &devmem->pagemap); |
1155 | if (IS_ERR(result)) | |
1156 | return result; | |
d3df0a42 | 1157 | return devmem; |
d3df0a42 | 1158 | } |
02917e9f | 1159 | EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); |
d3df0a42 | 1160 | |
858b54da JG |
1161 | /* |
1162 | * A device driver that wants to handle multiple devices memory through a | |
1163 | * single fake device can use hmm_device to do so. This is purely a helper | |
1164 | * and it is not needed to make use of any HMM functionality. | |
1165 | */ | |
1166 | #define HMM_DEVICE_MAX 256 | |
1167 | ||
1168 | static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); | |
1169 | static DEFINE_SPINLOCK(hmm_device_lock); | |
1170 | static struct class *hmm_device_class; | |
1171 | static dev_t hmm_device_devt; | |
1172 | ||
1173 | static void hmm_device_release(struct device *device) | |
1174 | { | |
1175 | struct hmm_device *hmm_device; | |
1176 | ||
1177 | hmm_device = container_of(device, struct hmm_device, device); | |
1178 | spin_lock(&hmm_device_lock); | |
1179 | clear_bit(hmm_device->minor, hmm_device_mask); | |
1180 | spin_unlock(&hmm_device_lock); | |
1181 | ||
1182 | kfree(hmm_device); | |
1183 | } | |
1184 | ||
1185 | struct hmm_device *hmm_device_new(void *drvdata) | |
1186 | { | |
1187 | struct hmm_device *hmm_device; | |
1188 | ||
1189 | hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); | |
1190 | if (!hmm_device) | |
1191 | return ERR_PTR(-ENOMEM); | |
1192 | ||
1193 | spin_lock(&hmm_device_lock); | |
1194 | hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); | |
1195 | if (hmm_device->minor >= HMM_DEVICE_MAX) { | |
1196 | spin_unlock(&hmm_device_lock); | |
1197 | kfree(hmm_device); | |
1198 | return ERR_PTR(-EBUSY); | |
1199 | } | |
1200 | set_bit(hmm_device->minor, hmm_device_mask); | |
1201 | spin_unlock(&hmm_device_lock); | |
1202 | ||
1203 | dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); | |
1204 | hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), | |
1205 | hmm_device->minor); | |
1206 | hmm_device->device.release = hmm_device_release; | |
1207 | dev_set_drvdata(&hmm_device->device, drvdata); | |
1208 | hmm_device->device.class = hmm_device_class; | |
1209 | device_initialize(&hmm_device->device); | |
1210 | ||
1211 | return hmm_device; | |
1212 | } | |
1213 | EXPORT_SYMBOL(hmm_device_new); | |
1214 | ||
1215 | void hmm_device_put(struct hmm_device *hmm_device) | |
1216 | { | |
1217 | put_device(&hmm_device->device); | |
1218 | } | |
1219 | EXPORT_SYMBOL(hmm_device_put); | |
1220 | ||
1221 | static int __init hmm_init(void) | |
1222 | { | |
1223 | int ret; | |
1224 | ||
1225 | ret = alloc_chrdev_region(&hmm_device_devt, 0, | |
1226 | HMM_DEVICE_MAX, | |
1227 | "hmm_device"); | |
1228 | if (ret) | |
1229 | return ret; | |
1230 | ||
1231 | hmm_device_class = class_create(THIS_MODULE, "hmm_device"); | |
1232 | if (IS_ERR(hmm_device_class)) { | |
1233 | unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); | |
1234 | return PTR_ERR(hmm_device_class); | |
1235 | } | |
1236 | return 0; | |
1237 | } | |
1238 | ||
1239 | device_initcall(hmm_init); | |
df6ad698 | 1240 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |