2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
34 * For coherent userptr handling registers an MMU notifier to inform the driver
35 * about updates on the page tables of a process.
37 * When somebody tries to invalidate the page tables we block the update until
38 * all operations on the pages in question are completed, then those pages are
39 * marked as accessed and also dirty if it wasn't a read only access.
41 * New command submissions using the userptrs in question are delayed until all
42 * page table invalidation are completed and we once more see a coherent process
46 #include <linux/firmware.h>
47 #include <linux/module.h>
51 #include "amdgpu_amdkfd.h"
54 * struct amdgpu_mn_node
56 * @it: interval node defining start-last of the affected address range
57 * @bos: list of all BOs in the affected address range
59 * Manages all BOs which are affected of a certain range of address space.
61 struct amdgpu_mn_node {
62 struct interval_tree_node it;
67 * amdgpu_mn_destroy - destroy the HMM mirror
69 * @work: previously sheduled work item
71 * Lazy destroys the notifier from a work item
73 static void amdgpu_mn_destroy(struct work_struct *work)
75 struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
76 struct amdgpu_device *adev = amn->adev;
77 struct amdgpu_mn_node *node, *next_node;
78 struct amdgpu_bo *bo, *next_bo;
80 mutex_lock(&adev->mn_lock);
81 down_write(&amn->lock);
83 rbtree_postorder_for_each_entry_safe(node, next_node,
84 &amn->objects.rb_root, it.rb) {
85 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
87 list_del_init(&bo->mn_list);
92 mutex_unlock(&adev->mn_lock);
94 hmm_mirror_unregister(&amn->mirror);
99 * amdgpu_hmm_mirror_release - callback to notify about mm destruction
101 * @mirror: the HMM mirror (mm) this callback is about
103 * Shedule a work item to lazy destroy HMM mirror.
105 static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror)
107 struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
109 INIT_WORK(&amn->work, amdgpu_mn_destroy);
110 schedule_work(&amn->work);
114 * amdgpu_mn_lock - take the write side lock for this notifier
118 void amdgpu_mn_lock(struct amdgpu_mn *mn)
121 down_write(&mn->lock);
125 * amdgpu_mn_unlock - drop the write side lock for this notifier
129 void amdgpu_mn_unlock(struct amdgpu_mn *mn)
136 * amdgpu_mn_read_lock - take the read side lock for this notifier
140 static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
143 down_read(&amn->lock);
144 else if (!down_read_trylock(&amn->lock))
151 * amdgpu_mn_read_unlock - drop the read side lock for this notifier
155 static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
161 * amdgpu_mn_invalidate_node - unmap all BOs of a node
163 * @node: the node with the BOs to unmap
164 * @start: start of address range affected
165 * @end: end of address range affected
167 * Block for operations on BOs to finish and mark pages as accessed and
170 static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
174 struct amdgpu_bo *bo;
177 list_for_each_entry(bo, &node->bos, mn_list) {
179 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
182 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
183 true, false, MAX_SCHEDULE_TIMEOUT);
185 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
190 * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change
192 * @mirror: the hmm_mirror (mm) is about to update
193 * @update: the update start, end address
195 * Block for operations on BOs to finish and mark pages as accessed and
199 amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
200 const struct mmu_notifier_range *update)
202 struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
203 unsigned long start = update->start;
204 unsigned long end = update->end;
205 bool blockable = mmu_notifier_range_blockable(update);
206 struct interval_tree_node *it;
208 /* notification is exclusive, but interval is inclusive */
211 /* TODO we should be able to split locking for interval tree and
212 * amdgpu_mn_invalidate_node
214 if (amdgpu_mn_read_lock(amn, blockable))
217 it = interval_tree_iter_first(&amn->objects, start, end);
219 struct amdgpu_mn_node *node;
222 amdgpu_mn_read_unlock(amn);
226 node = container_of(it, struct amdgpu_mn_node, it);
227 it = interval_tree_iter_next(it, start, end);
229 amdgpu_mn_invalidate_node(node, start, end);
232 amdgpu_mn_read_unlock(amn);
238 * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change
240 * @mirror: the hmm_mirror (mm) is about to update
241 * @update: the update start, end address
243 * We temporarily evict all BOs between start and end. This
244 * necessitates evicting all user-mode queues of the process. The BOs
245 * are restorted in amdgpu_mn_invalidate_range_end_hsa.
248 amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
249 const struct mmu_notifier_range *update)
251 struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
252 unsigned long start = update->start;
253 unsigned long end = update->end;
254 bool blockable = mmu_notifier_range_blockable(update);
255 struct interval_tree_node *it;
257 /* notification is exclusive, but interval is inclusive */
260 if (amdgpu_mn_read_lock(amn, blockable))
263 it = interval_tree_iter_first(&amn->objects, start, end);
265 struct amdgpu_mn_node *node;
266 struct amdgpu_bo *bo;
269 amdgpu_mn_read_unlock(amn);
273 node = container_of(it, struct amdgpu_mn_node, it);
274 it = interval_tree_iter_next(it, start, end);
276 list_for_each_entry(bo, &node->bos, mn_list) {
277 struct kgd_mem *mem = bo->kfd_bo;
279 if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
281 amdgpu_amdkfd_evict_userptr(mem, amn->mm);
285 amdgpu_mn_read_unlock(amn);
290 /* Low bits of any reasonable mm pointer will be unused due to struct
291 * alignment. Use these bits to make a unique key from the mm pointer
294 #define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
296 static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
297 [AMDGPU_MN_TYPE_GFX] = {
298 .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx,
299 .release = amdgpu_hmm_mirror_release
301 [AMDGPU_MN_TYPE_HSA] = {
302 .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa,
303 .release = amdgpu_hmm_mirror_release
308 * amdgpu_mn_get - create HMM mirror context
310 * @adev: amdgpu device pointer
311 * @type: type of MMU notifier context
313 * Creates a HMM mirror context for current->mm.
315 struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
316 enum amdgpu_mn_type type)
318 struct mm_struct *mm = current->mm;
319 struct amdgpu_mn *amn;
320 unsigned long key = AMDGPU_MN_KEY(mm, type);
323 mutex_lock(&adev->mn_lock);
324 if (down_write_killable(&mm->mmap_sem)) {
325 mutex_unlock(&adev->mn_lock);
326 return ERR_PTR(-EINTR);
329 hash_for_each_possible(adev->mn_hash, amn, node, key)
330 if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
333 amn = kzalloc(sizeof(*amn), GFP_KERNEL);
335 amn = ERR_PTR(-ENOMEM);
341 init_rwsem(&amn->lock);
343 amn->objects = RB_ROOT_CACHED;
345 amn->mirror.ops = &amdgpu_hmm_mirror_ops[type];
346 r = hmm_mirror_register(&amn->mirror, mm);
350 hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
353 up_write(&mm->mmap_sem);
354 mutex_unlock(&adev->mn_lock);
359 up_write(&mm->mmap_sem);
360 mutex_unlock(&adev->mn_lock);
367 * amdgpu_mn_register - register a BO for notifier updates
369 * @bo: amdgpu buffer object
370 * @addr: userptr addr we should monitor
372 * Registers an HMM mirror for the given BO at the specified address.
373 * Returns 0 on success, -ERRNO if anything goes wrong.
375 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
377 unsigned long end = addr + amdgpu_bo_size(bo) - 1;
378 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
379 enum amdgpu_mn_type type =
380 bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
381 struct amdgpu_mn *amn;
382 struct amdgpu_mn_node *node = NULL, *new_node;
383 struct list_head bos;
384 struct interval_tree_node *it;
386 amn = amdgpu_mn_get(adev, type);
390 new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
394 INIT_LIST_HEAD(&bos);
396 down_write(&amn->lock);
398 while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
400 node = container_of(it, struct amdgpu_mn_node, it);
401 interval_tree_remove(&node->it, &amn->objects);
402 addr = min(it->start, addr);
403 end = max(it->last, end);
404 list_splice(&node->bos, &bos);
414 node->it.start = addr;
416 INIT_LIST_HEAD(&node->bos);
417 list_splice(&bos, &node->bos);
418 list_add(&bo->mn_list, &node->bos);
420 interval_tree_insert(&node->it, &amn->objects);
422 up_write(&amn->lock);
428 * amdgpu_mn_unregister - unregister a BO for HMM mirror updates
430 * @bo: amdgpu buffer object
432 * Remove any registration of HMM mirror updates from the buffer object.
434 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
436 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
437 struct amdgpu_mn *amn;
438 struct list_head *head;
440 mutex_lock(&adev->mn_lock);
444 mutex_unlock(&adev->mn_lock);
448 down_write(&amn->lock);
450 /* save the next list entry for later */
451 head = bo->mn_list.next;
454 list_del_init(&bo->mn_list);
456 if (list_empty(head)) {
457 struct amdgpu_mn_node *node;
459 node = container_of(head, struct amdgpu_mn_node, bos);
460 interval_tree_remove(&node->it, &amn->objects);
464 up_write(&amn->lock);
465 mutex_unlock(&adev->mn_lock);
468 /* flags used by HMM internal, not related to CPU/GPU PTE flags */
469 static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
470 (1 << 0), /* HMM_PFN_VALID */
471 (1 << 1), /* HMM_PFN_WRITE */
472 0 /* HMM_PFN_DEVICE_PRIVATE */
475 static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
476 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
477 0, /* HMM_PFN_NONE */
478 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
481 void amdgpu_hmm_init_range(struct hmm_range *range)
484 range->flags = hmm_range_flags;
485 range->values = hmm_range_values;
486 range->pfn_shift = PAGE_SHIFT;