1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include "amdgpu_sync.h"
27 #include "amdgpu_object.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_mn.h"
31 #include "amdgpu_xgmi.h"
34 #include "kfd_migrate.h"
36 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
38 /* Long enough to ensure no retry fault comes after svm range is restored and
39 * page table is updated.
41 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING 2000
43 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
45 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
46 const struct mmu_notifier_range *range,
47 unsigned long cur_seq);
49 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
50 .invalidate = svm_range_cpu_invalidate_pagetables,
54 * svm_range_unlink - unlink svm_range from lists and interval tree
55 * @prange: svm range structure to be removed
57 * Remove the svm_range from the svms and svm_bo lists and the svms
60 * Context: The caller must hold svms->lock
62 static void svm_range_unlink(struct svm_range *prange)
64 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
65 prange, prange->start, prange->last);
68 spin_lock(&prange->svm_bo->list_lock);
69 list_del(&prange->svm_bo_list);
70 spin_unlock(&prange->svm_bo->list_lock);
73 list_del(&prange->list);
74 if (prange->it_node.start != 0 && prange->it_node.last != 0)
75 interval_tree_remove(&prange->it_node, &prange->svms->objects);
79 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
81 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
82 prange, prange->start, prange->last);
84 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
85 prange->start << PAGE_SHIFT,
86 prange->npages << PAGE_SHIFT,
91 * svm_range_add_to_svms - add svm range to svms
92 * @prange: svm range structure to be added
94 * Add the svm range to svms interval tree and link list
96 * Context: The caller must hold svms->lock
98 static void svm_range_add_to_svms(struct svm_range *prange)
100 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
101 prange, prange->start, prange->last);
103 list_add_tail(&prange->list, &prange->svms->list);
104 prange->it_node.start = prange->start;
105 prange->it_node.last = prange->last;
106 interval_tree_insert(&prange->it_node, &prange->svms->objects);
109 static void svm_range_remove_notifier(struct svm_range *prange)
111 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
112 prange->svms, prange,
113 prange->notifier.interval_tree.start >> PAGE_SHIFT,
114 prange->notifier.interval_tree.last >> PAGE_SHIFT);
116 if (prange->notifier.interval_tree.start != 0 &&
117 prange->notifier.interval_tree.last != 0)
118 mmu_interval_notifier_remove(&prange->notifier);
122 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
123 unsigned long *hmm_pfns, uint32_t gpuidx)
125 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
126 dma_addr_t *addr = prange->dma_addr[gpuidx];
127 struct device *dev = adev->dev;
132 addr = kvmalloc_array(prange->npages, sizeof(*addr),
133 GFP_KERNEL | __GFP_ZERO);
136 prange->dma_addr[gpuidx] = addr;
139 for (i = 0; i < prange->npages; i++) {
140 if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
141 "leaking dma mapping\n"))
142 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
144 page = hmm_pfn_to_page(hmm_pfns[i]);
145 if (is_zone_device_page(page)) {
146 struct amdgpu_device *bo_adev =
147 amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
149 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
150 bo_adev->vm_manager.vram_base_offset -
151 bo_adev->kfd.dev->pgmap.range.start;
152 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
153 pr_debug("vram address detected: 0x%llx\n", addr[i]);
156 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
157 r = dma_mapping_error(dev, addr[i]);
159 pr_debug("failed %d dma_map_page\n", r);
162 pr_debug("dma mapping 0x%llx for page addr 0x%lx\n",
163 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
169 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
170 unsigned long *hmm_pfns)
172 struct kfd_process *p;
176 p = container_of(prange->svms, struct kfd_process, svms);
178 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
179 struct kfd_process_device *pdd;
180 struct amdgpu_device *adev;
182 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
183 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
185 pr_debug("failed to find device idx %d\n", gpuidx);
188 adev = (struct amdgpu_device *)pdd->dev->kgd;
190 r = svm_range_dma_map_dev(adev, prange, hmm_pfns, gpuidx);
198 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
199 unsigned long offset, unsigned long npages)
201 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
207 for (i = offset; i < offset + npages; i++) {
208 if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
210 pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
211 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
216 void svm_range_free_dma_mappings(struct svm_range *prange)
218 struct kfd_process_device *pdd;
219 dma_addr_t *dma_addr;
221 struct kfd_process *p;
224 p = container_of(prange->svms, struct kfd_process, svms);
226 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
227 dma_addr = prange->dma_addr[gpuidx];
231 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
233 pr_debug("failed to find device idx %d\n", gpuidx);
236 dev = &pdd->dev->pdev->dev;
237 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
239 prange->dma_addr[gpuidx] = NULL;
243 static void svm_range_free(struct svm_range *prange)
245 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
246 prange->start, prange->last);
248 svm_range_vram_node_free(prange);
249 svm_range_free_dma_mappings(prange);
250 mutex_destroy(&prange->lock);
251 mutex_destroy(&prange->migrate_mutex);
256 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
257 uint8_t *granularity, uint32_t *flags)
259 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
260 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
263 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
267 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
270 uint64_t size = last - start + 1;
271 struct svm_range *prange;
272 struct kfd_process *p;
274 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
277 prange->npages = size;
279 prange->start = start;
281 INIT_LIST_HEAD(&prange->list);
282 INIT_LIST_HEAD(&prange->update_list);
283 INIT_LIST_HEAD(&prange->remove_list);
284 INIT_LIST_HEAD(&prange->insert_list);
285 INIT_LIST_HEAD(&prange->svm_bo_list);
286 INIT_LIST_HEAD(&prange->deferred_list);
287 INIT_LIST_HEAD(&prange->child_list);
288 atomic_set(&prange->invalid, 0);
289 prange->validate_timestamp = 0;
290 mutex_init(&prange->migrate_mutex);
291 mutex_init(&prange->lock);
293 p = container_of(svms, struct kfd_process, svms);
294 if (p->xnack_enabled)
295 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
298 svm_range_set_default_attributes(&prange->preferred_loc,
299 &prange->prefetch_loc,
300 &prange->granularity, &prange->flags);
302 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
307 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
309 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
315 static void svm_range_bo_release(struct kref *kref)
317 struct svm_range_bo *svm_bo;
319 svm_bo = container_of(kref, struct svm_range_bo, kref);
320 spin_lock(&svm_bo->list_lock);
321 while (!list_empty(&svm_bo->range_list)) {
322 struct svm_range *prange =
323 list_first_entry(&svm_bo->range_list,
324 struct svm_range, svm_bo_list);
325 /* list_del_init tells a concurrent svm_range_vram_node_new when
326 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
328 list_del_init(&prange->svm_bo_list);
329 spin_unlock(&svm_bo->list_lock);
331 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
332 prange->start, prange->last);
333 mutex_lock(&prange->lock);
334 prange->svm_bo = NULL;
335 mutex_unlock(&prange->lock);
337 spin_lock(&svm_bo->list_lock);
339 spin_unlock(&svm_bo->list_lock);
340 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
341 /* We're not in the eviction worker.
342 * Signal the fence and synchronize with any
343 * pending eviction work.
345 dma_fence_signal(&svm_bo->eviction_fence->base);
346 cancel_work_sync(&svm_bo->eviction_work);
348 dma_fence_put(&svm_bo->eviction_fence->base);
349 amdgpu_bo_unref(&svm_bo->bo);
353 void svm_range_bo_unref(struct svm_range_bo *svm_bo)
358 kref_put(&svm_bo->kref, svm_range_bo_release);
362 svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
364 struct amdgpu_device *bo_adev;
366 mutex_lock(&prange->lock);
367 if (!prange->svm_bo) {
368 mutex_unlock(&prange->lock);
371 if (prange->ttm_res) {
372 /* We still have a reference, all is well */
373 mutex_unlock(&prange->lock);
376 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
378 * Migrate from GPU to GPU, remove range from source bo_adev
379 * svm_bo range list, and return false to allocate svm_bo from
382 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
383 if (bo_adev != adev) {
384 mutex_unlock(&prange->lock);
386 spin_lock(&prange->svm_bo->list_lock);
387 list_del_init(&prange->svm_bo_list);
388 spin_unlock(&prange->svm_bo->list_lock);
390 svm_range_bo_unref(prange->svm_bo);
393 if (READ_ONCE(prange->svm_bo->evicting)) {
395 struct svm_range_bo *svm_bo;
396 /* The BO is getting evicted,
397 * we need to get a new one
399 mutex_unlock(&prange->lock);
400 svm_bo = prange->svm_bo;
401 f = dma_fence_get(&svm_bo->eviction_fence->base);
402 svm_range_bo_unref(prange->svm_bo);
403 /* wait for the fence to avoid long spin-loop
404 * at list_empty_careful
406 dma_fence_wait(f, false);
409 /* The BO was still around and we got
410 * a new reference to it
412 mutex_unlock(&prange->lock);
413 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
414 prange->svms, prange->start, prange->last);
416 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
421 mutex_unlock(&prange->lock);
424 /* We need a new svm_bo. Spin-loop to wait for concurrent
425 * svm_range_bo_release to finish removing this range from
426 * its range list. After this, it is safe to reuse the
427 * svm_bo pointer and svm_bo_list head.
429 while (!list_empty_careful(&prange->svm_bo_list))
435 static struct svm_range_bo *svm_range_bo_new(void)
437 struct svm_range_bo *svm_bo;
439 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
443 kref_init(&svm_bo->kref);
444 INIT_LIST_HEAD(&svm_bo->range_list);
445 spin_lock_init(&svm_bo->list_lock);
451 svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
454 struct amdgpu_bo_param bp;
455 struct svm_range_bo *svm_bo;
456 struct amdgpu_bo_user *ubo;
457 struct amdgpu_bo *bo;
458 struct kfd_process *p;
459 struct mm_struct *mm;
462 p = container_of(prange->svms, struct kfd_process, svms);
463 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
464 prange->start, prange->last);
466 if (svm_range_validate_svm_bo(adev, prange))
469 svm_bo = svm_range_bo_new();
471 pr_debug("failed to alloc svm bo\n");
474 mm = get_task_mm(p->lead_thread);
476 pr_debug("failed to get mm\n");
480 svm_bo->svms = prange->svms;
481 svm_bo->eviction_fence =
482 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
486 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
487 svm_bo->evicting = 0;
488 memset(&bp, 0, sizeof(bp));
489 bp.size = prange->npages * PAGE_SIZE;
490 bp.byte_align = PAGE_SIZE;
491 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
492 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
493 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
494 bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO;
495 bp.type = ttm_bo_type_device;
498 r = amdgpu_bo_create_user(adev, &bp, &ubo);
500 pr_debug("failed %d to create bo\n", r);
501 goto create_bo_failed;
504 r = amdgpu_bo_reserve(bo, true);
506 pr_debug("failed %d to reserve bo\n", r);
507 goto reserve_bo_failed;
510 r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
512 pr_debug("failed %d to reserve bo\n", r);
513 amdgpu_bo_unreserve(bo);
514 goto reserve_bo_failed;
516 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
518 amdgpu_bo_unreserve(bo);
521 prange->svm_bo = svm_bo;
522 prange->ttm_res = bo->tbo.resource;
525 spin_lock(&svm_bo->list_lock);
526 list_add(&prange->svm_bo_list, &svm_bo->range_list);
527 spin_unlock(&svm_bo->list_lock);
532 amdgpu_bo_unref(&bo);
534 dma_fence_put(&svm_bo->eviction_fence->base);
536 prange->ttm_res = NULL;
541 void svm_range_vram_node_free(struct svm_range *prange)
543 svm_range_bo_unref(prange->svm_bo);
544 prange->ttm_res = NULL;
547 struct amdgpu_device *
548 svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
550 struct kfd_process_device *pdd;
551 struct kfd_process *p;
554 p = container_of(prange->svms, struct kfd_process, svms);
556 gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
558 pr_debug("failed to get device by id 0x%x\n", gpu_id);
561 pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
563 pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
567 return (struct amdgpu_device *)pdd->dev->kgd;
570 struct kfd_process_device *
571 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
573 struct kfd_process *p;
574 int32_t gpu_idx, gpuid;
577 p = container_of(prange->svms, struct kfd_process, svms);
579 r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx);
581 pr_debug("failed to get device id by adev %p\n", adev);
585 return kfd_process_device_from_gpuidx(p, gpu_idx);
588 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
590 struct ttm_operation_ctx ctx = { false, false };
592 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
594 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
598 svm_range_check_attr(struct kfd_process *p,
599 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
603 for (i = 0; i < nattr; i++) {
604 uint32_t val = attrs[i].value;
605 int gpuidx = MAX_GPU_INSTANCE;
607 switch (attrs[i].type) {
608 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
609 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
610 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
611 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
613 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
614 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
615 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
617 case KFD_IOCTL_SVM_ATTR_ACCESS:
618 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
619 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
620 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
622 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
624 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
626 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
629 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
634 pr_debug("no GPU 0x%x found\n", val);
636 } else if (gpuidx < MAX_GPU_INSTANCE &&
637 !test_bit(gpuidx, p->svms.bitmap_supported)) {
638 pr_debug("GPU 0x%x not supported\n", val);
647 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
648 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
653 for (i = 0; i < nattr; i++) {
654 switch (attrs[i].type) {
655 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
656 prange->preferred_loc = attrs[i].value;
658 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
659 prange->prefetch_loc = attrs[i].value;
661 case KFD_IOCTL_SVM_ATTR_ACCESS:
662 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
663 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
664 gpuidx = kfd_process_gpuidx_from_gpuid(p,
666 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
667 bitmap_clear(prange->bitmap_access, gpuidx, 1);
668 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
669 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
670 bitmap_set(prange->bitmap_access, gpuidx, 1);
671 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
673 bitmap_clear(prange->bitmap_access, gpuidx, 1);
674 bitmap_set(prange->bitmap_aip, gpuidx, 1);
677 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
678 prange->flags |= attrs[i].value;
680 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
681 prange->flags &= ~attrs[i].value;
683 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
684 prange->granularity = attrs[i].value;
687 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
693 * svm_range_debug_dump - print all range information from svms
694 * @svms: svm range list header
696 * debug output svm range start, end, prefetch location from svms
697 * interval tree and link list
699 * Context: The caller must hold svms->lock
701 static void svm_range_debug_dump(struct svm_range_list *svms)
703 struct interval_tree_node *node;
704 struct svm_range *prange;
706 pr_debug("dump svms 0x%p list\n", svms);
707 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
709 list_for_each_entry(prange, &svms->list, list) {
710 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
711 prange, prange->start, prange->npages,
712 prange->start + prange->npages - 1,
716 pr_debug("dump svms 0x%p interval tree\n", svms);
717 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
718 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
720 prange = container_of(node, struct svm_range, it_node);
721 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
722 prange, prange->start, prange->npages,
723 prange->start + prange->npages - 1,
725 node = interval_tree_iter_next(node, 0, ~0ULL);
730 svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new)
732 return (old->prefetch_loc == new->prefetch_loc &&
733 old->flags == new->flags &&
734 old->granularity == new->granularity);
738 svm_range_split_array(void *ppnew, void *ppold, size_t size,
739 uint64_t old_start, uint64_t old_n,
740 uint64_t new_start, uint64_t new_n)
742 unsigned char *new, *old, *pold;
747 pold = *(unsigned char **)ppold;
751 new = kvmalloc_array(new_n, size, GFP_KERNEL);
755 d = (new_start - old_start) * size;
756 memcpy(new, pold + d, new_n * size);
758 old = kvmalloc_array(old_n, size, GFP_KERNEL);
764 d = (new_start == old_start) ? new_n * size : 0;
765 memcpy(old, pold + d, old_n * size);
768 *(void **)ppold = old;
769 *(void **)ppnew = new;
775 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
776 uint64_t start, uint64_t last)
778 uint64_t npages = last - start + 1;
781 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
782 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
783 sizeof(*old->dma_addr[i]), old->start,
784 npages, new->start, new->npages);
793 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
794 uint64_t start, uint64_t last)
796 uint64_t npages = last - start + 1;
798 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
799 new->svms, new, new->start, start, last);
801 if (new->start == old->start) {
802 new->offset = old->offset;
803 old->offset += new->npages;
805 new->offset = old->offset + npages;
808 new->svm_bo = svm_range_bo_ref(old->svm_bo);
809 new->ttm_res = old->ttm_res;
811 spin_lock(&new->svm_bo->list_lock);
812 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
813 spin_unlock(&new->svm_bo->list_lock);
819 * svm_range_split_adjust - split range and adjust
822 * @old: the old range
823 * @start: the old range adjust to start address in pages
824 * @last: the old range adjust to last address in pages
826 * Copy system memory dma_addr or vram ttm_res in old range to new
827 * range from new_start up to size new->npages, the remaining old range is from
831 * 0 - OK, -ENOMEM - out of memory
834 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
835 uint64_t start, uint64_t last)
839 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
840 new->svms, new->start, old->start, old->last, start, last);
842 if (new->start < old->start ||
843 new->last > old->last) {
844 WARN_ONCE(1, "invalid new range start or last\n");
848 r = svm_range_split_pages(new, old, start, last);
852 if (old->actual_loc && old->ttm_res) {
853 r = svm_range_split_nodes(new, old, start, last);
858 old->npages = last - start + 1;
861 new->flags = old->flags;
862 new->preferred_loc = old->preferred_loc;
863 new->prefetch_loc = old->prefetch_loc;
864 new->actual_loc = old->actual_loc;
865 new->granularity = old->granularity;
866 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
867 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
873 * svm_range_split - split a range in 2 ranges
875 * @prange: the svm range to split
876 * @start: the remaining range start address in pages
877 * @last: the remaining range last address in pages
878 * @new: the result new range generated
881 * case 1: if start == prange->start
882 * prange ==> prange[start, last]
883 * new range [last + 1, prange->last]
885 * case 2: if last == prange->last
886 * prange ==> prange[start, last]
887 * new range [prange->start, start - 1]
890 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
893 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
894 struct svm_range **new)
896 uint64_t old_start = prange->start;
897 uint64_t old_last = prange->last;
898 struct svm_range_list *svms;
901 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
902 old_start, old_last, start, last);
904 if (old_start != start && old_last != last)
906 if (start < old_start || last > old_last)
910 if (old_start == start)
911 *new = svm_range_new(svms, last + 1, old_last);
913 *new = svm_range_new(svms, old_start, start - 1);
917 r = svm_range_split_adjust(*new, prange, start, last);
919 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
920 r, old_start, old_last, start, last);
921 svm_range_free(*new);
929 svm_range_split_tail(struct svm_range *prange, struct svm_range *new,
930 uint64_t new_last, struct list_head *insert_list)
932 struct svm_range *tail;
933 int r = svm_range_split(prange, prange->start, new_last, &tail);
936 list_add(&tail->insert_list, insert_list);
941 svm_range_split_head(struct svm_range *prange, struct svm_range *new,
942 uint64_t new_start, struct list_head *insert_list)
944 struct svm_range *head;
945 int r = svm_range_split(prange, new_start, prange->last, &head);
948 list_add(&head->insert_list, insert_list);
953 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
954 struct svm_range *pchild, enum svm_work_list_ops op)
956 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
957 pchild, pchild->start, pchild->last, prange, op);
959 pchild->work_item.mm = mm;
960 pchild->work_item.op = op;
961 list_add_tail(&pchild->child_list, &prange->child_list);
965 * svm_range_split_by_granularity - collect ranges within granularity boundary
967 * @p: the process with svms list
969 * @addr: the vm fault address in pages, to split the prange
970 * @parent: parent range if prange is from child list
971 * @prange: prange to split
973 * Trims @prange to be a single aligned block of prange->granularity if
974 * possible. The head and tail are added to the child_list in @parent.
976 * Context: caller must hold mmap_read_lock and prange->lock
979 * 0 - OK, otherwise error code
982 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
983 unsigned long addr, struct svm_range *parent,
984 struct svm_range *prange)
986 struct svm_range *head, *tail;
987 unsigned long start, last, size;
990 /* Align splited range start and size to granularity size, then a single
991 * PTE will be used for whole range, this reduces the number of PTE
992 * updated and the L1 TLB space used for translation.
994 size = 1UL << prange->granularity;
995 start = ALIGN_DOWN(addr, size);
996 last = ALIGN(addr + 1, size) - 1;
998 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
999 prange->svms, prange->start, prange->last, start, last, size);
1001 if (start > prange->start) {
1002 r = svm_range_split(prange, start, prange->last, &head);
1005 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1008 if (last < prange->last) {
1009 r = svm_range_split(prange, prange->start, last, &tail);
1012 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1015 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1016 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1017 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1018 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1019 prange, prange->start, prange->last,
1020 SVM_OP_ADD_RANGE_AND_MAP);
1026 svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1029 struct amdgpu_device *bo_adev;
1030 uint32_t flags = prange->flags;
1031 uint32_t mapping_flags = 0;
1033 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1034 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1036 if (domain == SVM_RANGE_VRAM_DOMAIN)
1037 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1039 switch (adev->asic_type) {
1041 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1042 if (bo_adev == adev) {
1043 mapping_flags |= coherent ?
1044 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1046 mapping_flags |= coherent ?
1047 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1048 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1052 mapping_flags |= coherent ?
1053 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1056 case CHIP_ALDEBARAN:
1057 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1058 if (bo_adev == adev) {
1059 mapping_flags |= coherent ?
1060 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1061 if (adev->gmc.xgmi.connected_to_cpu)
1064 mapping_flags |= coherent ?
1065 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1066 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1070 mapping_flags |= coherent ?
1071 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1075 mapping_flags |= coherent ?
1076 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1079 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1081 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1082 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1083 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1084 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1086 pte_flags = AMDGPU_PTE_VALID;
1087 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1088 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1090 pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1092 pr_debug("svms 0x%p [0x%lx 0x%lx] vram %d PTE 0x%llx mapping 0x%x\n",
1093 prange->svms, prange->start, prange->last,
1094 (domain == SVM_RANGE_VRAM_DOMAIN) ? 1:0, pte_flags, mapping_flags);
1100 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1101 uint64_t start, uint64_t last,
1102 struct dma_fence **fence)
1104 uint64_t init_pte_value = 0;
1106 pr_debug("[0x%llx 0x%llx]\n", start, last);
1108 return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
1109 start, last, init_pte_value, 0,
1110 NULL, NULL, fence, NULL);
1114 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1117 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1118 struct kfd_process_device *pdd;
1119 struct dma_fence *fence = NULL;
1120 struct amdgpu_device *adev;
1121 struct kfd_process *p;
1125 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1127 p = container_of(prange->svms, struct kfd_process, svms);
1129 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1130 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1131 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1133 pr_debug("failed to find device idx %d\n", gpuidx);
1136 adev = (struct amdgpu_device *)pdd->dev->kgd;
1138 r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
1139 start, last, &fence);
1144 r = dma_fence_wait(fence, false);
1145 dma_fence_put(fence);
1150 amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
1151 p->pasid, TLB_FLUSH_HEAVYWEIGHT);
1158 svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1159 struct svm_range *prange, dma_addr_t *dma_addr,
1160 struct amdgpu_device *bo_adev, struct dma_fence **fence)
1162 struct amdgpu_bo_va bo_va;
1163 bool table_freed = false;
1165 unsigned long last_start;
1170 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
1173 if (prange->svm_bo && prange->ttm_res)
1174 bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
1176 last_start = prange->start;
1177 for (i = 0; i < prange->npages; i++) {
1178 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1179 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1180 if ((prange->start + i) < prange->last &&
1181 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1184 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1185 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1186 pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1187 r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
1189 prange->start + i, pte_flags,
1190 last_start - prange->start,
1196 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1199 last_start = prange->start + i + 1;
1202 r = amdgpu_vm_update_pdes(adev, vm, false);
1204 pr_debug("failed %d to update directories 0x%lx\n", r,
1210 *fence = dma_fence_get(vm->last_update);
1213 struct kfd_process *p;
1215 p = container_of(prange->svms, struct kfd_process, svms);
1216 amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
1217 p->pasid, TLB_FLUSH_LEGACY);
1223 static int svm_range_map_to_gpus(struct svm_range *prange,
1224 unsigned long *bitmap, bool wait)
1226 struct kfd_process_device *pdd;
1227 struct amdgpu_device *bo_adev;
1228 struct amdgpu_device *adev;
1229 struct kfd_process *p;
1230 struct dma_fence *fence = NULL;
1234 if (prange->svm_bo && prange->ttm_res)
1235 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1239 p = container_of(prange->svms, struct kfd_process, svms);
1240 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1241 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1242 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1244 pr_debug("failed to find device idx %d\n", gpuidx);
1247 adev = (struct amdgpu_device *)pdd->dev->kgd;
1249 pdd = kfd_bind_process_to_device(pdd->dev, p);
1253 if (bo_adev && adev != bo_adev &&
1254 !amdgpu_xgmi_same_hive(adev, bo_adev)) {
1255 pr_debug("cannot map to device idx %d\n", gpuidx);
1259 r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
1260 prange, prange->dma_addr[gpuidx],
1261 bo_adev, wait ? &fence : NULL);
1266 r = dma_fence_wait(fence, false);
1267 dma_fence_put(fence);
1270 pr_debug("failed %d to dma fence wait\n", r);
1279 struct svm_validate_context {
1280 struct kfd_process *process;
1281 struct svm_range *prange;
1283 unsigned long bitmap[MAX_GPU_INSTANCE];
1284 struct ttm_validate_buffer tv[MAX_GPU_INSTANCE+1];
1285 struct list_head validate_list;
1286 struct ww_acquire_ctx ticket;
1289 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1291 struct kfd_process_device *pdd;
1292 struct amdgpu_device *adev;
1293 struct amdgpu_vm *vm;
1297 INIT_LIST_HEAD(&ctx->validate_list);
1298 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1299 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1301 pr_debug("failed to find device idx %d\n", gpuidx);
1304 adev = (struct amdgpu_device *)pdd->dev->kgd;
1305 vm = drm_priv_to_vm(pdd->drm_priv);
1307 ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1308 ctx->tv[gpuidx].num_shared = 4;
1309 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1311 if (ctx->prange->svm_bo && ctx->prange->ttm_res) {
1312 ctx->tv[MAX_GPU_INSTANCE].bo = &ctx->prange->svm_bo->bo->tbo;
1313 ctx->tv[MAX_GPU_INSTANCE].num_shared = 1;
1314 list_add(&ctx->tv[MAX_GPU_INSTANCE].head, &ctx->validate_list);
1317 r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1320 pr_debug("failed %d to reserve bo\n", r);
1324 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1325 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1327 pr_debug("failed to find device idx %d\n", gpuidx);
1331 adev = (struct amdgpu_device *)pdd->dev->kgd;
1333 r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv),
1334 svm_range_bo_validate, NULL);
1336 pr_debug("failed %d validate pt bos\n", r);
1344 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1348 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1350 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1353 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1355 struct kfd_process_device *pdd;
1356 struct amdgpu_device *adev;
1358 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1359 adev = (struct amdgpu_device *)pdd->dev->kgd;
1361 return SVM_ADEV_PGMAP_OWNER(adev);
1365 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1367 * To prevent concurrent destruction or change of range attributes, the
1368 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1369 * because that would block concurrent evictions and lead to deadlocks. To
1370 * serialize concurrent migrations or validations of the same range, the
1371 * prange->migrate_mutex must be held.
1373 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1376 * The following sequence ensures race-free validation and GPU mapping:
1378 * 1. Reserve page table (and SVM BO if range is in VRAM)
1379 * 2. hmm_range_fault to get page addresses (if system memory)
1380 * 3. DMA-map pages (if system memory)
1381 * 4-a. Take notifier lock
1382 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1383 * 4-c. Check that the range was not split or otherwise invalidated
1384 * 4-d. Update GPU page table
1385 * 4.e. Release notifier lock
1386 * 5. Release page table (and SVM BO) reservation
1388 static int svm_range_validate_and_map(struct mm_struct *mm,
1389 struct svm_range *prange,
1390 int32_t gpuidx, bool intr, bool wait)
1392 struct svm_validate_context ctx;
1393 struct hmm_range *hmm_range;
1394 struct kfd_process *p;
1399 ctx.process = container_of(prange->svms, struct kfd_process, svms);
1400 ctx.prange = prange;
1403 if (gpuidx < MAX_GPU_INSTANCE) {
1404 bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1405 bitmap_set(ctx.bitmap, gpuidx, 1);
1406 } else if (ctx.process->xnack_enabled) {
1407 bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1409 /* If prefetch range to GPU, or GPU retry fault migrate range to
1410 * GPU, which has ACCESS attribute to the range, create mapping
1413 if (prange->actual_loc) {
1414 gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1415 prange->actual_loc);
1417 WARN_ONCE(1, "failed get device by id 0x%x\n",
1418 prange->actual_loc);
1421 if (test_bit(gpuidx, prange->bitmap_access))
1422 bitmap_set(ctx.bitmap, gpuidx, 1);
1425 bitmap_or(ctx.bitmap, prange->bitmap_access,
1426 prange->bitmap_aip, MAX_GPU_INSTANCE);
1429 if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE))
1432 if (prange->actual_loc && !prange->ttm_res) {
1433 /* This should never happen. actual_loc gets set by
1434 * svm_migrate_ram_to_vram after allocating a BO.
1436 WARN(1, "VRAM BO missing during validation\n");
1440 svm_range_reserve_bos(&ctx);
1442 p = container_of(prange->svms, struct kfd_process, svms);
1443 owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1445 for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1446 if (kfd_svm_page_owner(p, idx) != owner) {
1451 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
1452 prange->start << PAGE_SHIFT,
1453 prange->npages, &hmm_range,
1454 false, true, owner);
1456 pr_debug("failed %d to get svm range pages\n", r);
1460 r = svm_range_dma_map(prange, ctx.bitmap,
1461 hmm_range->hmm_pfns);
1463 pr_debug("failed %d to dma map range\n", r);
1467 prange->validated_once = true;
1469 svm_range_lock(prange);
1470 if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1471 pr_debug("hmm update the range, need validate again\n");
1475 if (!list_empty(&prange->child_list)) {
1476 pr_debug("range split by unmap in parallel, validate again\n");
1481 r = svm_range_map_to_gpus(prange, ctx.bitmap, wait);
1484 svm_range_unlock(prange);
1486 svm_range_unreserve_bos(&ctx);
1489 prange->validate_timestamp = ktime_to_us(ktime_get());
1495 * svm_range_list_lock_and_flush_work - flush pending deferred work
1497 * @svms: the svm range list
1498 * @mm: the mm structure
1500 * Context: Returns with mmap write lock held, pending deferred work flushed
1504 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1505 struct mm_struct *mm)
1508 flush_work(&svms->deferred_list_work);
1509 mmap_write_lock(mm);
1511 if (list_empty(&svms->deferred_range_list))
1513 mmap_write_unlock(mm);
1514 pr_debug("retry flush\n");
1515 goto retry_flush_work;
1518 static void svm_range_restore_work(struct work_struct *work)
1520 struct delayed_work *dwork = to_delayed_work(work);
1521 struct amdkfd_process_info *process_info;
1522 struct svm_range_list *svms;
1523 struct svm_range *prange;
1524 struct kfd_process *p;
1525 struct mm_struct *mm;
1530 svms = container_of(dwork, struct svm_range_list, restore_work);
1531 evicted_ranges = atomic_read(&svms->evicted_ranges);
1532 if (!evicted_ranges)
1535 pr_debug("restore svm ranges\n");
1537 /* kfd_process_notifier_release destroys this worker thread. So during
1538 * the lifetime of this thread, kfd_process and mm will be valid.
1540 p = container_of(svms, struct kfd_process, svms);
1541 process_info = p->kgd_process_info;
1546 mutex_lock(&process_info->lock);
1547 svm_range_list_lock_and_flush_work(svms, mm);
1548 mutex_lock(&svms->lock);
1550 evicted_ranges = atomic_read(&svms->evicted_ranges);
1552 list_for_each_entry(prange, &svms->list, list) {
1553 invalid = atomic_read(&prange->invalid);
1557 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1558 prange->svms, prange, prange->start, prange->last,
1562 * If range is migrating, wait for migration is done.
1564 mutex_lock(&prange->migrate_mutex);
1566 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1569 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1572 mutex_unlock(&prange->migrate_mutex);
1574 goto out_reschedule;
1576 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1577 goto out_reschedule;
1580 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1582 goto out_reschedule;
1586 r = kgd2kfd_resume_mm(mm);
1588 /* No recovery from this failure. Probably the CP is
1589 * hanging. No point trying again.
1591 pr_debug("failed %d to resume KFD\n", r);
1594 pr_debug("restore svm ranges successfully\n");
1597 mutex_unlock(&svms->lock);
1598 mmap_write_unlock(mm);
1599 mutex_unlock(&process_info->lock);
1601 /* If validation failed, reschedule another attempt */
1602 if (evicted_ranges) {
1603 pr_debug("reschedule to restore svm range\n");
1604 schedule_delayed_work(&svms->restore_work,
1605 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1610 * svm_range_evict - evict svm range
1612 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1613 * return to let CPU evict the buffer and proceed CPU pagetable update.
1615 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1616 * If invalidation happens while restore work is running, restore work will
1617 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1621 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1622 unsigned long start, unsigned long last)
1624 struct svm_range_list *svms = prange->svms;
1625 struct svm_range *pchild;
1626 struct kfd_process *p;
1629 p = container_of(svms, struct kfd_process, svms);
1631 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1632 svms, prange->start, prange->last, start, last);
1634 if (!p->xnack_enabled) {
1637 list_for_each_entry(pchild, &prange->child_list, child_list) {
1638 mutex_lock_nested(&pchild->lock, 1);
1639 if (pchild->start <= last && pchild->last >= start) {
1640 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1641 pchild->start, pchild->last);
1642 atomic_inc(&pchild->invalid);
1644 mutex_unlock(&pchild->lock);
1647 if (prange->start <= last && prange->last >= start)
1648 atomic_inc(&prange->invalid);
1650 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1651 if (evicted_ranges != 1)
1654 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1655 prange->svms, prange->start, prange->last);
1657 /* First eviction, stop the queues */
1658 r = kgd2kfd_quiesce_mm(mm);
1660 pr_debug("failed to quiesce KFD\n");
1662 pr_debug("schedule to restore svm %p ranges\n", svms);
1663 schedule_delayed_work(&svms->restore_work,
1664 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1668 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1669 prange->svms, start, last);
1670 list_for_each_entry(pchild, &prange->child_list, child_list) {
1671 mutex_lock_nested(&pchild->lock, 1);
1672 s = max(start, pchild->start);
1673 l = min(last, pchild->last);
1675 svm_range_unmap_from_gpus(pchild, s, l);
1676 mutex_unlock(&pchild->lock);
1678 s = max(start, prange->start);
1679 l = min(last, prange->last);
1681 svm_range_unmap_from_gpus(prange, s, l);
1687 static struct svm_range *svm_range_clone(struct svm_range *old)
1689 struct svm_range *new;
1691 new = svm_range_new(old->svms, old->start, old->last);
1696 new->ttm_res = old->ttm_res;
1697 new->offset = old->offset;
1698 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1699 spin_lock(&new->svm_bo->list_lock);
1700 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1701 spin_unlock(&new->svm_bo->list_lock);
1703 new->flags = old->flags;
1704 new->preferred_loc = old->preferred_loc;
1705 new->prefetch_loc = old->prefetch_loc;
1706 new->actual_loc = old->actual_loc;
1707 new->granularity = old->granularity;
1708 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1709 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1715 * svm_range_handle_overlap - split overlap ranges
1716 * @svms: svm range list header
1717 * @new: range added with this attributes
1718 * @start: range added start address, in pages
1719 * @last: range last address, in pages
1720 * @update_list: output, the ranges attributes are updated. For set_attr, this
1721 * will do validation and map to GPUs. For unmap, this will be
1722 * removed and unmap from GPUs
1723 * @insert_list: output, the ranges will be inserted into svms, attributes are
1724 * not changes. For set_attr, this will add into svms.
1725 * @remove_list:output, the ranges will be removed from svms
1726 * @left: the remaining range after overlap, For set_attr, this will be added
1729 * Total have 5 overlap cases.
1731 * This function handles overlap of an address interval with existing
1732 * struct svm_ranges for applying new attributes. This may require
1733 * splitting existing struct svm_ranges. All changes should be applied to
1734 * the range_list and interval tree transactionally. If any split operation
1735 * fails, the entire update fails. Therefore the existing overlapping
1736 * svm_ranges are cloned and the original svm_ranges left unchanged. If the
1737 * transaction succeeds, the modified clones are added and the originals
1738 * freed. Otherwise the clones are removed and the old svm_ranges remain.
1740 * Context: The caller must hold svms->lock
1743 svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
1744 unsigned long start, unsigned long last,
1745 struct list_head *update_list,
1746 struct list_head *insert_list,
1747 struct list_head *remove_list,
1748 unsigned long *left)
1750 struct interval_tree_node *node;
1751 struct svm_range *prange;
1752 struct svm_range *tmp;
1755 INIT_LIST_HEAD(update_list);
1756 INIT_LIST_HEAD(insert_list);
1757 INIT_LIST_HEAD(remove_list);
1759 node = interval_tree_iter_first(&svms->objects, start, last);
1761 struct interval_tree_node *next;
1762 struct svm_range *old;
1763 unsigned long next_start;
1765 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
1768 old = container_of(node, struct svm_range, it_node);
1769 next = interval_tree_iter_next(node, start, last);
1770 next_start = min(node->last, last) + 1;
1772 if (node->start < start || node->last > last) {
1773 /* node intersects the updated range, clone+split it */
1774 prange = svm_range_clone(old);
1780 list_add(&old->remove_list, remove_list);
1781 list_add(&prange->insert_list, insert_list);
1783 if (node->start < start) {
1784 pr_debug("change old range start\n");
1785 r = svm_range_split_head(prange, new, start,
1790 if (node->last > last) {
1791 pr_debug("change old range last\n");
1792 r = svm_range_split_tail(prange, new, last,
1798 /* The node is contained within start..last,
1804 if (!svm_range_is_same_attrs(prange, new))
1805 list_add(&prange->update_list, update_list);
1807 /* insert a new node if needed */
1808 if (node->start > start) {
1809 prange = svm_range_new(prange->svms, start,
1816 list_add(&prange->insert_list, insert_list);
1817 list_add(&prange->update_list, update_list);
1824 if (left && start <= last)
1825 *left = last - start + 1;
1829 list_for_each_entry_safe(prange, tmp, insert_list, insert_list)
1830 svm_range_free(prange);
1836 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
1837 struct svm_range *prange)
1839 unsigned long start;
1842 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
1843 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
1845 if (prange->start == start && prange->last == last)
1848 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1849 prange->svms, prange, start, last, prange->start,
1852 if (start != 0 && last != 0) {
1853 interval_tree_remove(&prange->it_node, &prange->svms->objects);
1854 svm_range_remove_notifier(prange);
1856 prange->it_node.start = prange->start;
1857 prange->it_node.last = prange->last;
1859 interval_tree_insert(&prange->it_node, &prange->svms->objects);
1860 svm_range_add_notifier_locked(mm, prange);
1864 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
1866 struct mm_struct *mm = prange->work_item.mm;
1868 switch (prange->work_item.op) {
1870 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1871 svms, prange, prange->start, prange->last);
1873 case SVM_OP_UNMAP_RANGE:
1874 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1875 svms, prange, prange->start, prange->last);
1876 svm_range_unlink(prange);
1877 svm_range_remove_notifier(prange);
1878 svm_range_free(prange);
1880 case SVM_OP_UPDATE_RANGE_NOTIFIER:
1881 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1882 svms, prange, prange->start, prange->last);
1883 svm_range_update_notifier_and_interval_tree(mm, prange);
1885 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
1886 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1887 svms, prange, prange->start, prange->last);
1888 svm_range_update_notifier_and_interval_tree(mm, prange);
1889 /* TODO: implement deferred validation and mapping */
1891 case SVM_OP_ADD_RANGE:
1892 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
1893 prange->start, prange->last);
1894 svm_range_add_to_svms(prange);
1895 svm_range_add_notifier_locked(mm, prange);
1897 case SVM_OP_ADD_RANGE_AND_MAP:
1898 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
1899 prange, prange->start, prange->last);
1900 svm_range_add_to_svms(prange);
1901 svm_range_add_notifier_locked(mm, prange);
1902 /* TODO: implement deferred validation and mapping */
1905 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
1906 prange->work_item.op);
1910 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
1912 struct kfd_process_device *pdd;
1913 struct amdgpu_device *adev;
1914 struct kfd_process *p;
1917 p = container_of(svms, struct kfd_process, svms);
1919 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
1924 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
1925 adev = (struct amdgpu_device *)pdd->dev->kgd;
1927 amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
1928 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
1932 static void svm_range_deferred_list_work(struct work_struct *work)
1934 struct svm_range_list *svms;
1935 struct svm_range *prange;
1936 struct mm_struct *mm;
1938 svms = container_of(work, struct svm_range_list, deferred_list_work);
1939 pr_debug("enter svms 0x%p\n", svms);
1941 spin_lock(&svms->deferred_list_lock);
1942 while (!list_empty(&svms->deferred_range_list)) {
1943 prange = list_first_entry(&svms->deferred_range_list,
1944 struct svm_range, deferred_list);
1945 spin_unlock(&svms->deferred_list_lock);
1946 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
1947 prange->start, prange->last, prange->work_item.op);
1949 /* Make sure no stale retry fault coming after range is freed */
1950 if (prange->work_item.op == SVM_OP_UNMAP_RANGE)
1951 svm_range_drain_retry_fault(prange->svms);
1953 mm = prange->work_item.mm;
1954 mmap_write_lock(mm);
1955 mutex_lock(&svms->lock);
1957 /* Remove from deferred_list must be inside mmap write lock,
1958 * otherwise, svm_range_list_lock_and_flush_work may hold mmap
1959 * write lock, and continue because deferred_list is empty, then
1960 * deferred_list handle is blocked by mmap write lock.
1962 spin_lock(&svms->deferred_list_lock);
1963 list_del_init(&prange->deferred_list);
1964 spin_unlock(&svms->deferred_list_lock);
1966 mutex_lock(&prange->migrate_mutex);
1967 while (!list_empty(&prange->child_list)) {
1968 struct svm_range *pchild;
1970 pchild = list_first_entry(&prange->child_list,
1971 struct svm_range, child_list);
1972 pr_debug("child prange 0x%p op %d\n", pchild,
1973 pchild->work_item.op);
1974 list_del_init(&pchild->child_list);
1975 svm_range_handle_list_op(svms, pchild);
1977 mutex_unlock(&prange->migrate_mutex);
1979 svm_range_handle_list_op(svms, prange);
1980 mutex_unlock(&svms->lock);
1981 mmap_write_unlock(mm);
1983 spin_lock(&svms->deferred_list_lock);
1985 spin_unlock(&svms->deferred_list_lock);
1987 pr_debug("exit svms 0x%p\n", svms);
1991 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
1992 struct mm_struct *mm, enum svm_work_list_ops op)
1994 spin_lock(&svms->deferred_list_lock);
1995 /* if prange is on the deferred list */
1996 if (!list_empty(&prange->deferred_list)) {
1997 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
1998 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
1999 if (op != SVM_OP_NULL &&
2000 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2001 prange->work_item.op = op;
2003 prange->work_item.op = op;
2004 prange->work_item.mm = mm;
2005 list_add_tail(&prange->deferred_list,
2006 &prange->svms->deferred_range_list);
2007 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2008 prange, prange->start, prange->last, op);
2010 spin_unlock(&svms->deferred_list_lock);
2013 void schedule_deferred_list_work(struct svm_range_list *svms)
2015 spin_lock(&svms->deferred_list_lock);
2016 if (!list_empty(&svms->deferred_range_list))
2017 schedule_work(&svms->deferred_list_work);
2018 spin_unlock(&svms->deferred_list_lock);
2022 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2023 struct svm_range *prange, unsigned long start,
2026 struct svm_range *head;
2027 struct svm_range *tail;
2029 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2030 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2031 prange->start, prange->last);
2034 if (start > prange->last || last < prange->start)
2037 head = tail = prange;
2038 if (start > prange->start)
2039 svm_range_split(prange, prange->start, start - 1, &tail);
2040 if (last < tail->last)
2041 svm_range_split(tail, last + 1, tail->last, &head);
2043 if (head != prange && tail != prange) {
2044 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2045 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2046 } else if (tail != prange) {
2047 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2048 } else if (head != prange) {
2049 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2050 } else if (parent != prange) {
2051 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2056 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2057 unsigned long start, unsigned long last)
2059 struct svm_range_list *svms;
2060 struct svm_range *pchild;
2061 struct kfd_process *p;
2065 p = kfd_lookup_process_by_mm(mm);
2070 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2071 prange, prange->start, prange->last, start, last);
2073 unmap_parent = start <= prange->start && last >= prange->last;
2075 list_for_each_entry(pchild, &prange->child_list, child_list) {
2076 mutex_lock_nested(&pchild->lock, 1);
2077 s = max(start, pchild->start);
2078 l = min(last, pchild->last);
2080 svm_range_unmap_from_gpus(pchild, s, l);
2081 svm_range_unmap_split(mm, prange, pchild, start, last);
2082 mutex_unlock(&pchild->lock);
2084 s = max(start, prange->start);
2085 l = min(last, prange->last);
2087 svm_range_unmap_from_gpus(prange, s, l);
2088 svm_range_unmap_split(mm, prange, prange, start, last);
2091 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2093 svm_range_add_list_work(svms, prange, mm,
2094 SVM_OP_UPDATE_RANGE_NOTIFIER);
2095 schedule_deferred_list_work(svms);
2097 kfd_unref_process(p);
2101 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2103 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2104 * is from migration, or CPU page invalidation callback.
2106 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2107 * work thread, and split prange if only part of prange is unmapped.
2109 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2110 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2111 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2112 * update GPU mapping to recover.
2114 * Context: mmap lock, notifier_invalidate_start lock are held
2115 * for invalidate event, prange lock is held if this is from migration
2118 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2119 const struct mmu_notifier_range *range,
2120 unsigned long cur_seq)
2122 struct svm_range *prange;
2123 unsigned long start;
2126 if (range->event == MMU_NOTIFY_RELEASE)
2129 start = mni->interval_tree.start;
2130 last = mni->interval_tree.last;
2131 start = (start > range->start ? start : range->start) >> PAGE_SHIFT;
2132 last = (last < (range->end - 1) ? last : range->end - 1) >> PAGE_SHIFT;
2133 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2134 start, last, range->start >> PAGE_SHIFT,
2135 (range->end - 1) >> PAGE_SHIFT,
2136 mni->interval_tree.start >> PAGE_SHIFT,
2137 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2139 prange = container_of(mni, struct svm_range, notifier);
2141 svm_range_lock(prange);
2142 mmu_interval_set_seq(mni, cur_seq);
2144 switch (range->event) {
2145 case MMU_NOTIFY_UNMAP:
2146 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2149 svm_range_evict(prange, mni->mm, start, last);
2153 svm_range_unlock(prange);
2159 * svm_range_from_addr - find svm range from fault address
2160 * @svms: svm range list header
2161 * @addr: address to search range interval tree, in pages
2162 * @parent: parent range if range is on child list
2164 * Context: The caller must hold svms->lock
2166 * Return: the svm_range found or NULL
2169 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2170 struct svm_range **parent)
2172 struct interval_tree_node *node;
2173 struct svm_range *prange;
2174 struct svm_range *pchild;
2176 node = interval_tree_iter_first(&svms->objects, addr, addr);
2180 prange = container_of(node, struct svm_range, it_node);
2181 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2182 addr, prange->start, prange->last, node->start, node->last);
2184 if (addr >= prange->start && addr <= prange->last) {
2189 list_for_each_entry(pchild, &prange->child_list, child_list)
2190 if (addr >= pchild->start && addr <= pchild->last) {
2191 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2192 addr, pchild->start, pchild->last);
2201 /* svm_range_best_restore_location - decide the best fault restore location
2202 * @prange: svm range structure
2203 * @adev: the GPU on which vm fault happened
2205 * This is only called when xnack is on, to decide the best location to restore
2206 * the range mapping after GPU vm fault. Caller uses the best location to do
2207 * migration if actual loc is not best location, then update GPU page table
2208 * mapping to the best location.
2210 * If vm fault gpu is range preferred loc, the best_loc is preferred loc.
2211 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2212 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2213 * if range actual loc is cpu, best_loc is cpu
2214 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2216 * Otherwise, GPU no access, best_loc is -1.
2219 * -1 means vm fault GPU no access
2220 * 0 for CPU or GPU id
2223 svm_range_best_restore_location(struct svm_range *prange,
2224 struct amdgpu_device *adev,
2227 struct amdgpu_device *bo_adev;
2228 struct kfd_process *p;
2232 p = container_of(prange->svms, struct kfd_process, svms);
2234 r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx);
2236 pr_debug("failed to get gpuid from kgd\n");
2240 if (prange->preferred_loc == gpuid)
2241 return prange->preferred_loc;
2243 if (test_bit(*gpuidx, prange->bitmap_access))
2246 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2247 if (!prange->actual_loc)
2250 bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
2251 if (amdgpu_xgmi_same_hive(adev, bo_adev))
2252 return prange->actual_loc;
2260 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2261 unsigned long *start, unsigned long *last)
2263 struct vm_area_struct *vma;
2264 struct interval_tree_node *node;
2265 unsigned long start_limit, end_limit;
2267 vma = find_vma(p->mm, addr << PAGE_SHIFT);
2268 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2269 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2272 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2273 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2274 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2275 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2276 /* First range that starts after the fault address */
2277 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2279 end_limit = min(end_limit, node->start);
2280 /* Last range that ends before the fault address */
2281 node = container_of(rb_prev(&node->rb),
2282 struct interval_tree_node, rb);
2284 /* Last range must end before addr because
2285 * there was no range after addr
2287 node = container_of(rb_last(&p->svms.objects.rb_root),
2288 struct interval_tree_node, rb);
2291 if (node->last >= addr) {
2292 WARN(1, "Overlap with prev node and page fault addr\n");
2295 start_limit = max(start_limit, node->last + 1);
2298 *start = start_limit;
2299 *last = end_limit - 1;
2301 pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n",
2302 vma->vm_start >> PAGE_SHIFT, *start,
2303 vma->vm_end >> PAGE_SHIFT, *last);
2309 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
2310 struct kfd_process *p,
2311 struct mm_struct *mm,
2314 struct svm_range *prange = NULL;
2315 unsigned long start, last;
2316 uint32_t gpuid, gpuidx;
2318 if (svm_range_get_range_boundaries(p, addr, &start, &last))
2321 prange = svm_range_new(&p->svms, start, last);
2323 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2326 if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
2327 pr_debug("failed to get gpuid from kgd\n");
2328 svm_range_free(prange);
2332 svm_range_add_to_svms(prange);
2333 svm_range_add_notifier_locked(mm, prange);
2338 /* svm_range_skip_recover - decide if prange can be recovered
2339 * @prange: svm range structure
2341 * GPU vm retry fault handle skip recover the range for cases:
2342 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2343 * deferred list work will drain the stale fault before free the prange.
2344 * 2. prange is on deferred list to add interval notifier after split, or
2345 * 3. prange is child range, it is split from parent prange, recover later
2346 * after interval notifier is added.
2348 * Return: true to skip recover, false to recover
2350 static bool svm_range_skip_recover(struct svm_range *prange)
2352 struct svm_range_list *svms = prange->svms;
2354 spin_lock(&svms->deferred_list_lock);
2355 if (list_empty(&prange->deferred_list) &&
2356 list_empty(&prange->child_list)) {
2357 spin_unlock(&svms->deferred_list_lock);
2360 spin_unlock(&svms->deferred_list_lock);
2362 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2363 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2364 svms, prange, prange->start, prange->last);
2367 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2368 prange->work_item.op == SVM_OP_ADD_RANGE) {
2369 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2370 svms, prange, prange->start, prange->last);
2377 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
2380 struct kfd_process_device *pdd;
2382 /* fault is on different page of same range
2383 * or fault is skipped to recover later
2384 * or fault is on invalid virtual address
2386 if (gpuidx == MAX_GPU_INSTANCE) {
2390 r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
2395 /* fault is recovered
2396 * or fault cannot recover because GPU no access on the range
2398 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2400 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2404 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2407 struct mm_struct *mm = NULL;
2408 struct svm_range_list *svms;
2409 struct svm_range *prange;
2410 struct kfd_process *p;
2413 int32_t gpuidx = MAX_GPU_INSTANCE;
2414 bool write_locked = false;
2417 if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
2418 pr_debug("device does not support SVM\n");
2422 p = kfd_lookup_process_by_pasid(pasid);
2424 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2427 if (!p->xnack_enabled) {
2428 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2433 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2435 mm = get_task_mm(p->lead_thread);
2437 pr_debug("svms 0x%p failed to get mm\n", svms);
2444 mutex_lock(&svms->lock);
2445 prange = svm_range_from_addr(svms, addr, NULL);
2447 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2449 if (!write_locked) {
2450 /* Need the write lock to create new range with MMU notifier.
2451 * Also flush pending deferred work to make sure the interval
2452 * tree is up to date before we add a new range
2454 mutex_unlock(&svms->lock);
2455 mmap_read_unlock(mm);
2456 mmap_write_lock(mm);
2457 write_locked = true;
2458 goto retry_write_locked;
2460 prange = svm_range_create_unregistered_range(adev, p, mm, addr);
2462 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2464 mmap_write_downgrade(mm);
2466 goto out_unlock_svms;
2470 mmap_write_downgrade(mm);
2472 mutex_lock(&prange->migrate_mutex);
2474 if (svm_range_skip_recover(prange)) {
2475 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2476 goto out_unlock_range;
2479 timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
2480 /* skip duplicate vm fault on different pages of same range */
2481 if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
2482 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2483 svms, prange->start, prange->last);
2484 goto out_unlock_range;
2487 best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
2488 if (best_loc == -1) {
2489 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2490 svms, prange->start, prange->last);
2492 goto out_unlock_range;
2495 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2496 svms, prange->start, prange->last, best_loc,
2497 prange->actual_loc);
2499 if (prange->actual_loc != best_loc) {
2501 r = svm_migrate_to_vram(prange, best_loc, mm);
2503 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2505 /* Fallback to system memory if migration to
2508 if (prange->actual_loc)
2509 r = svm_migrate_vram_to_ram(prange, mm);
2514 r = svm_migrate_vram_to_ram(prange, mm);
2517 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2518 r, svms, prange->start, prange->last);
2519 goto out_unlock_range;
2523 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
2525 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2526 r, svms, prange->start, prange->last);
2529 mutex_unlock(&prange->migrate_mutex);
2531 mutex_unlock(&svms->lock);
2532 mmap_read_unlock(mm);
2534 svm_range_count_fault(adev, p, gpuidx);
2538 kfd_unref_process(p);
2541 pr_debug("recover vm fault later\n");
2542 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2548 void svm_range_list_fini(struct kfd_process *p)
2550 struct svm_range *prange;
2551 struct svm_range *next;
2553 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
2555 /* Ensure list work is finished before process is destroyed */
2556 flush_work(&p->svms.deferred_list_work);
2558 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
2559 svm_range_unlink(prange);
2560 svm_range_remove_notifier(prange);
2561 svm_range_free(prange);
2564 mutex_destroy(&p->svms.lock);
2566 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
2569 int svm_range_list_init(struct kfd_process *p)
2571 struct svm_range_list *svms = &p->svms;
2574 svms->objects = RB_ROOT_CACHED;
2575 mutex_init(&svms->lock);
2576 INIT_LIST_HEAD(&svms->list);
2577 atomic_set(&svms->evicted_ranges, 0);
2578 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
2579 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
2580 INIT_LIST_HEAD(&svms->deferred_range_list);
2581 spin_lock_init(&svms->deferred_list_lock);
2583 for (i = 0; i < p->n_pdds; i++)
2584 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
2585 bitmap_set(svms->bitmap_supported, i, 1);
2591 * svm_range_is_valid - check if virtual address range is valid
2592 * @mm: current process mm_struct
2593 * @start: range start address, in pages
2594 * @size: range size, in pages
2596 * Valid virtual address range means it belongs to one or more VMAs
2598 * Context: Process context
2601 * true - valid svm range
2602 * false - invalid svm range
2605 svm_range_is_valid(struct mm_struct *mm, uint64_t start, uint64_t size)
2607 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
2608 struct vm_area_struct *vma;
2611 start <<= PAGE_SHIFT;
2612 end = start + (size << PAGE_SHIFT);
2615 vma = find_vma(mm, start);
2616 if (!vma || start < vma->vm_start ||
2617 (vma->vm_flags & device_vma))
2619 start = min(end, vma->vm_end);
2620 } while (start < end);
2626 * svm_range_add - add svm range and handle overlap
2627 * @p: the range add to this process svms
2628 * @start: page size aligned
2629 * @size: page size aligned
2630 * @nattr: number of attributes
2631 * @attrs: array of attributes
2632 * @update_list: output, the ranges need validate and update GPU mapping
2633 * @insert_list: output, the ranges need insert to svms
2634 * @remove_list: output, the ranges are replaced and need remove from svms
2636 * Check if the virtual address range has overlap with the registered ranges,
2637 * split the overlapped range, copy and adjust pages address and vram nodes in
2638 * old and new ranges.
2640 * Context: Process context, caller must hold svms->lock
2643 * 0 - OK, otherwise error code
2646 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2647 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2648 struct list_head *update_list, struct list_head *insert_list,
2649 struct list_head *remove_list)
2651 uint64_t last = start + size - 1UL;
2652 struct svm_range_list *svms;
2653 struct svm_range new = {0};
2654 struct svm_range *prange;
2655 unsigned long left = 0;
2658 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", &p->svms, start, last);
2660 svm_range_apply_attrs(p, &new, nattr, attrs);
2664 r = svm_range_handle_overlap(svms, &new, start, last, update_list,
2665 insert_list, remove_list, &left);
2670 prange = svm_range_new(svms, last - left + 1, last);
2671 list_add(&prange->insert_list, insert_list);
2672 list_add(&prange->update_list, update_list);
2678 /* svm_range_best_prefetch_location - decide the best prefetch location
2679 * @prange: svm range structure
2682 * If range map to single GPU, the best acutal location is prefetch loc, which
2683 * can be CPU or GPU.
2685 * If range map to multiple GPUs, only if mGPU connection on xgmi same hive,
2686 * the best actual location could be prefetch_loc GPU. If mGPU connection on
2687 * PCIe, the best actual location is always CPU, because GPU cannot access vram
2688 * of other GPUs, assuming PCIe small bar (large bar support is not upstream).
2691 * The best actual location is prefetch location. If mGPU connection on xgmi
2692 * same hive, range map to multiple GPUs. Otherwise, the range only map to
2693 * actual location GPU. Other GPU access vm fault will trigger migration.
2695 * Context: Process context
2698 * 0 for CPU or GPU id
2701 svm_range_best_prefetch_location(struct svm_range *prange)
2703 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
2704 uint32_t best_loc = prange->prefetch_loc;
2705 struct kfd_process_device *pdd;
2706 struct amdgpu_device *bo_adev;
2707 struct amdgpu_device *adev;
2708 struct kfd_process *p;
2711 p = container_of(prange->svms, struct kfd_process, svms);
2714 if (p->xnack_enabled)
2718 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
2721 bo_adev = svm_range_get_adev_by_id(prange, best_loc);
2723 WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
2727 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
2730 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
2731 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2733 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
2736 adev = (struct amdgpu_device *)pdd->dev->kgd;
2738 if (adev == bo_adev)
2741 if (!amdgpu_xgmi_same_hive(adev, bo_adev)) {
2748 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
2749 p->xnack_enabled, &p->svms, prange->start, prange->last,
2755 /* FIXME: This is a workaround for page locking bug when some pages are
2756 * invalid during migration to VRAM
2758 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
2761 struct hmm_range *hmm_range;
2764 if (prange->validated_once)
2767 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
2768 prange->start << PAGE_SHIFT,
2769 prange->npages, &hmm_range,
2770 false, true, owner);
2772 amdgpu_hmm_range_get_pages_done(hmm_range);
2773 prange->validated_once = true;
2777 /* svm_range_trigger_migration - start page migration if prefetch loc changed
2778 * @mm: current process mm_struct
2779 * @prange: svm range structure
2780 * @migrated: output, true if migration is triggered
2782 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
2784 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
2787 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
2789 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
2790 * stops all queues, schedule restore work
2791 * 2. svm_range_restore_work wait for migration is done by
2792 * a. svm_range_validate_vram takes prange->migrate_mutex
2793 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
2794 * 3. restore work update mappings of GPU, resume all queues.
2796 * Context: Process context
2799 * 0 - OK, otherwise - error code of migration
2802 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
2809 best_loc = svm_range_best_prefetch_location(prange);
2811 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
2812 best_loc == prange->actual_loc)
2816 r = svm_migrate_vram_to_ram(prange, mm);
2821 r = svm_migrate_to_vram(prange, best_loc, mm);
2827 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
2832 if (dma_fence_is_signaled(&fence->base))
2835 if (fence->svm_bo) {
2836 WRITE_ONCE(fence->svm_bo->evicting, 1);
2837 schedule_work(&fence->svm_bo->eviction_work);
2843 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
2845 struct svm_range_bo *svm_bo;
2846 struct kfd_process *p;
2847 struct mm_struct *mm;
2849 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
2850 if (!svm_bo_ref_unless_zero(svm_bo))
2851 return; /* svm_bo was freed while eviction was pending */
2853 /* svm_range_bo_release destroys this worker thread. So during
2854 * the lifetime of this thread, kfd_process and mm will be valid.
2856 p = container_of(svm_bo->svms, struct kfd_process, svms);
2862 spin_lock(&svm_bo->list_lock);
2863 while (!list_empty(&svm_bo->range_list)) {
2864 struct svm_range *prange =
2865 list_first_entry(&svm_bo->range_list,
2866 struct svm_range, svm_bo_list);
2867 list_del_init(&prange->svm_bo_list);
2868 spin_unlock(&svm_bo->list_lock);
2870 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
2871 prange->start, prange->last);
2873 mutex_lock(&prange->migrate_mutex);
2874 svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm);
2876 mutex_lock(&prange->lock);
2877 prange->svm_bo = NULL;
2878 mutex_unlock(&prange->lock);
2880 mutex_unlock(&prange->migrate_mutex);
2882 spin_lock(&svm_bo->list_lock);
2884 spin_unlock(&svm_bo->list_lock);
2885 mmap_read_unlock(mm);
2887 dma_fence_signal(&svm_bo->eviction_fence->base);
2888 /* This is the last reference to svm_bo, after svm_range_vram_node_free
2889 * has been called in svm_migrate_vram_to_ram
2891 WARN_ONCE(kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
2892 svm_range_bo_unref(svm_bo);
2896 svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
2897 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
2899 struct amdkfd_process_info *process_info = p->kgd_process_info;
2900 struct mm_struct *mm = current->mm;
2901 struct list_head update_list;
2902 struct list_head insert_list;
2903 struct list_head remove_list;
2904 struct svm_range_list *svms;
2905 struct svm_range *prange;
2906 struct svm_range *next;
2909 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
2910 p->pasid, &p->svms, start, start + size - 1, size);
2912 r = svm_range_check_attr(p, nattr, attrs);
2918 mutex_lock(&process_info->lock);
2920 svm_range_list_lock_and_flush_work(svms, mm);
2922 if (!svm_range_is_valid(mm, start, size)) {
2923 pr_debug("invalid range\n");
2925 mmap_write_unlock(mm);
2929 mutex_lock(&svms->lock);
2931 /* Add new range and split existing ranges as needed */
2932 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
2933 &insert_list, &remove_list);
2935 mutex_unlock(&svms->lock);
2936 mmap_write_unlock(mm);
2939 /* Apply changes as a transaction */
2940 list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
2941 svm_range_add_to_svms(prange);
2942 svm_range_add_notifier_locked(mm, prange);
2944 list_for_each_entry(prange, &update_list, update_list) {
2945 svm_range_apply_attrs(p, prange, nattr, attrs);
2946 /* TODO: unmap ranges from GPU that lost access */
2948 list_for_each_entry_safe(prange, next, &remove_list,
2950 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2951 prange->svms, prange, prange->start,
2953 svm_range_unlink(prange);
2954 svm_range_remove_notifier(prange);
2955 svm_range_free(prange);
2958 mmap_write_downgrade(mm);
2959 /* Trigger migrations and revalidate and map to GPUs as needed. If
2960 * this fails we may be left with partially completed actions. There
2961 * is no clean way of rolling back to the previous state in such a
2962 * case because the rollback wouldn't be guaranteed to work either.
2964 list_for_each_entry(prange, &update_list, update_list) {
2967 mutex_lock(&prange->migrate_mutex);
2969 r = svm_range_trigger_migration(mm, prange, &migrated);
2971 goto out_unlock_range;
2973 if (migrated && !p->xnack_enabled) {
2974 pr_debug("restore_work will update mappings of GPUs\n");
2975 mutex_unlock(&prange->migrate_mutex);
2979 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
2982 pr_debug("failed %d to map svm range\n", r);
2985 mutex_unlock(&prange->migrate_mutex);
2990 svm_range_debug_dump(svms);
2992 mutex_unlock(&svms->lock);
2993 mmap_read_unlock(mm);
2995 mutex_unlock(&process_info->lock);
2997 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
2998 &p->svms, start, start + size - 1, r);
3004 svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
3005 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
3007 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3008 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3009 bool get_preferred_loc = false;
3010 bool get_prefetch_loc = false;
3011 bool get_granularity = false;
3012 bool get_accessible = false;
3013 bool get_flags = false;
3014 uint64_t last = start + size - 1UL;
3015 struct mm_struct *mm = current->mm;
3016 uint8_t granularity = 0xff;
3017 struct interval_tree_node *node;
3018 struct svm_range_list *svms;
3019 struct svm_range *prange;
3020 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3021 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3022 uint32_t flags = 0xffffffff;
3026 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3027 start + size - 1, nattr);
3029 /* Flush pending deferred work to avoid racing with deferred actions from
3030 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3031 * can still race with get_attr because we don't hold the mmap lock. But that
3032 * would be a race condition in the application anyway, and undefined
3033 * behaviour is acceptable in that case.
3035 flush_work(&p->svms.deferred_list_work);
3038 if (!svm_range_is_valid(mm, start, size)) {
3039 pr_debug("invalid range\n");
3040 mmap_read_unlock(mm);
3043 mmap_read_unlock(mm);
3045 for (i = 0; i < nattr; i++) {
3046 switch (attrs[i].type) {
3047 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3048 get_preferred_loc = true;
3050 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3051 get_prefetch_loc = true;
3053 case KFD_IOCTL_SVM_ATTR_ACCESS:
3054 get_accessible = true;
3056 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3059 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3060 get_granularity = true;
3062 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3063 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3064 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3067 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3074 mutex_lock(&svms->lock);
3076 node = interval_tree_iter_first(&svms->objects, start, last);
3078 pr_debug("range attrs not found return default values\n");
3079 svm_range_set_default_attributes(&location, &prefetch_loc,
3080 &granularity, &flags);
3081 if (p->xnack_enabled)
3082 bitmap_copy(bitmap_access, svms->bitmap_supported,
3085 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3086 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3089 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3090 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3093 struct interval_tree_node *next;
3095 prange = container_of(node, struct svm_range, it_node);
3096 next = interval_tree_iter_next(node, start, last);
3098 if (get_preferred_loc) {
3099 if (prange->preferred_loc ==
3100 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3101 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3102 location != prange->preferred_loc)) {
3103 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3104 get_preferred_loc = false;
3106 location = prange->preferred_loc;
3109 if (get_prefetch_loc) {
3110 if (prange->prefetch_loc ==
3111 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3112 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3113 prefetch_loc != prange->prefetch_loc)) {
3114 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3115 get_prefetch_loc = false;
3117 prefetch_loc = prange->prefetch_loc;
3120 if (get_accessible) {
3121 bitmap_and(bitmap_access, bitmap_access,
3122 prange->bitmap_access, MAX_GPU_INSTANCE);
3123 bitmap_and(bitmap_aip, bitmap_aip,
3124 prange->bitmap_aip, MAX_GPU_INSTANCE);
3127 flags &= prange->flags;
3129 if (get_granularity && prange->granularity < granularity)
3130 granularity = prange->granularity;
3135 mutex_unlock(&svms->lock);
3137 for (i = 0; i < nattr; i++) {
3138 switch (attrs[i].type) {
3139 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3140 attrs[i].value = location;
3142 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3143 attrs[i].value = prefetch_loc;
3145 case KFD_IOCTL_SVM_ATTR_ACCESS:
3146 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3149 pr_debug("invalid gpuid %x\n", attrs[i].value);
3152 if (test_bit(gpuidx, bitmap_access))
3153 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3154 else if (test_bit(gpuidx, bitmap_aip))
3156 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3158 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3160 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3161 attrs[i].value = flags;
3163 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3164 attrs[i].value = (uint32_t)granularity;
3173 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
3174 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
3178 start >>= PAGE_SHIFT;
3179 size >>= PAGE_SHIFT;
3182 case KFD_IOCTL_SVM_OP_SET_ATTR:
3183 r = svm_range_set_attr(p, start, size, nattrs, attrs);
3185 case KFD_IOCTL_SVM_OP_GET_ATTR:
3186 r = svm_range_get_attr(p, start, size, nattrs, attrs);