1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
20 #include <linux/swap.h>
22 #include <generated/xe_wa_oob.h>
24 #include "regs/xe_gtt_defs.h"
25 #include "xe_assert.h"
27 #include "xe_device.h"
28 #include "xe_drm_client.h"
29 #include "xe_exec_queue.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
35 #include "xe_preempt_fence.h"
37 #include "xe_res_cursor.h"
43 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
45 return vm->gpuvm.r_obj;
49 * xe_vma_userptr_check_repin() - Advisory check for repin needed
50 * @uvma: The userptr vma
52 * Check if the userptr vma has been invalidated since last successful
53 * repin. The check is advisory only and can the function can be called
54 * without the vm->userptr.notifier_lock held. There is no guarantee that the
55 * vma userptr will remain valid after a lockless check, so typically
56 * the call needs to be followed by a proper check under the notifier_lock.
58 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
60 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
62 return mmu_interval_check_retry(&uvma->userptr.notifier,
63 uvma->userptr.notifier_seq) ?
67 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
69 struct xe_vma *vma = &uvma->vma;
70 struct xe_vm *vm = xe_vma_vm(vma);
71 struct xe_device *xe = vm->xe;
73 lockdep_assert_held(&vm->lock);
74 xe_assert(xe, xe_vma_is_userptr(vma));
76 return xe_hmm_userptr_populate_range(uvma, false);
79 static bool preempt_fences_waiting(struct xe_vm *vm)
81 struct xe_exec_queue *q;
83 lockdep_assert_held(&vm->lock);
84 xe_vm_assert_held(vm);
86 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
87 if (!q->compute.pfence ||
88 (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
89 &q->compute.pfence->flags))) {
97 static void free_preempt_fences(struct list_head *list)
99 struct list_head *link, *next;
101 list_for_each_safe(link, next, list)
102 xe_preempt_fence_free(to_preempt_fence_from_link(link));
105 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
108 lockdep_assert_held(&vm->lock);
109 xe_vm_assert_held(vm);
111 if (*count >= vm->preempt.num_exec_queues)
114 for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
115 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
118 return PTR_ERR(pfence);
120 list_move_tail(xe_preempt_fence_link(pfence), list);
126 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
128 struct xe_exec_queue *q;
130 xe_vm_assert_held(vm);
132 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
133 if (q->compute.pfence) {
134 long timeout = dma_fence_wait(q->compute.pfence, false);
138 dma_fence_put(q->compute.pfence);
139 q->compute.pfence = NULL;
146 static bool xe_vm_is_idle(struct xe_vm *vm)
148 struct xe_exec_queue *q;
150 xe_vm_assert_held(vm);
151 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
152 if (!xe_exec_queue_is_idle(q))
159 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
161 struct list_head *link;
162 struct xe_exec_queue *q;
164 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
165 struct dma_fence *fence;
168 xe_assert(vm->xe, link != list);
170 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
171 q, q->compute.context,
173 dma_fence_put(q->compute.pfence);
174 q->compute.pfence = fence;
178 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
180 struct xe_exec_queue *q;
183 if (!vm->preempt.num_exec_queues)
186 err = xe_bo_lock(bo, true);
190 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
194 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
195 if (q->compute.pfence) {
196 dma_resv_add_fence(bo->ttm.base.resv,
198 DMA_RESV_USAGE_BOOKKEEP);
206 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
207 struct drm_exec *exec)
209 struct xe_exec_queue *q;
211 lockdep_assert_held(&vm->lock);
212 xe_vm_assert_held(vm);
214 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
217 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
218 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
222 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
224 struct drm_gpuvm_exec vm_exec = {
226 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
229 struct drm_exec *exec = &vm_exec.exec;
230 struct dma_fence *pfence;
234 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
236 down_write(&vm->lock);
237 err = drm_gpuvm_exec_lock(&vm_exec);
241 pfence = xe_preempt_fence_create(q, q->compute.context,
248 list_add(&q->compute.link, &vm->preempt.exec_queues);
249 ++vm->preempt.num_exec_queues;
250 q->compute.pfence = pfence;
252 down_read(&vm->userptr.notifier_lock);
254 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
255 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
258 * Check to see if a preemption on VM is in flight or userptr
259 * invalidation, if so trigger this preempt fence to sync state with
260 * other preempt fences on the VM.
262 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
264 dma_fence_enable_sw_signaling(pfence);
266 up_read(&vm->userptr.notifier_lock);
277 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
281 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
283 if (!xe_vm_in_preempt_fence_mode(vm))
286 down_write(&vm->lock);
287 list_del(&q->compute.link);
288 --vm->preempt.num_exec_queues;
289 if (q->compute.pfence) {
290 dma_fence_enable_sw_signaling(q->compute.pfence);
291 dma_fence_put(q->compute.pfence);
292 q->compute.pfence = NULL;
298 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
299 * that need repinning.
302 * This function checks for whether the VM has userptrs that need repinning,
303 * and provides a release-type barrier on the userptr.notifier_lock after
306 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
308 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
310 lockdep_assert_held_read(&vm->userptr.notifier_lock);
312 return (list_empty(&vm->userptr.repin_list) &&
313 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
316 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
318 static void xe_vm_kill(struct xe_vm *vm)
320 struct xe_exec_queue *q;
322 lockdep_assert_held(&vm->lock);
324 xe_vm_lock(vm, false);
325 vm->flags |= XE_VM_FLAG_BANNED;
326 trace_xe_vm_kill(vm);
328 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
332 /* TODO: Inform user the VM is banned */
336 * xe_vm_validate_should_retry() - Whether to retry after a validate error.
337 * @exec: The drm_exec object used for locking before validation.
338 * @err: The error returned from ttm_bo_validate().
339 * @end: A ktime_t cookie that should be set to 0 before first use and
340 * that should be reused on subsequent calls.
342 * With multiple active VMs, under memory pressure, it is possible that
343 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
344 * Until ttm properly handles locking in such scenarios, best thing the
345 * driver can do is retry with a timeout. Check if that is necessary, and
346 * if so unlock the drm_exec's objects while keeping the ticket to prepare
349 * Return: true if a retry after drm_exec_init() is recommended;
352 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
360 *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
361 if (!ktime_before(cur, *end))
368 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
370 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
371 struct drm_gpuva *gpuva;
374 lockdep_assert_held(&vm->lock);
375 drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
376 list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
379 ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
383 vm_bo->evicted = false;
388 * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
389 * @vm: The vm for which we are rebinding.
390 * @exec: The struct drm_exec with the locked GEM objects.
391 * @num_fences: The number of fences to reserve for the operation, not
392 * including rebinds and validations.
394 * Validates all evicted gem objects and rebinds their vmas. Note that
395 * rebindings may cause evictions and hence the validation-rebind
396 * sequence is rerun until there are no more objects to validate.
398 * Return: 0 on success, negative error code on error. In particular,
399 * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
400 * the drm_exec transaction needs to be restarted.
402 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
403 unsigned int num_fences)
405 struct drm_gem_object *obj;
410 ret = drm_gpuvm_validate(&vm->gpuvm, exec);
414 ret = xe_vm_rebind(vm, false);
417 } while (!list_empty(&vm->gpuvm.evict.list));
419 drm_exec_for_each_locked_object(exec, index, obj) {
420 ret = dma_resv_reserve_fences(obj->resv, num_fences);
428 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
433 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
437 if (xe_vm_is_idle(vm)) {
438 vm->preempt.rebind_deactivated = true;
443 if (!preempt_fences_waiting(vm)) {
448 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
452 err = wait_for_existing_preempt_fences(vm);
457 * Add validation and rebinding to the locking loop since both can
458 * cause evictions which may require blocing dma_resv locks.
459 * The fence reservation here is intended for the new preempt fences
460 * we attach at the end of the rebind work.
462 return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
465 static void preempt_rebind_work_func(struct work_struct *w)
467 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
468 struct drm_exec exec;
469 unsigned int fence_count = 0;
470 LIST_HEAD(preempt_fences);
474 int __maybe_unused tries = 0;
476 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
477 trace_xe_vm_rebind_worker_enter(vm);
479 down_write(&vm->lock);
481 if (xe_vm_is_closed_or_banned(vm)) {
483 trace_xe_vm_rebind_worker_exit(vm);
488 if (xe_vm_userptr_check_repin(vm)) {
489 err = xe_vm_userptr_pin(vm);
491 goto out_unlock_outer;
494 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
496 drm_exec_until_all_locked(&exec) {
499 err = xe_preempt_work_begin(&exec, vm, &done);
500 drm_exec_retry_on_contention(&exec);
502 drm_exec_fini(&exec);
503 if (err && xe_vm_validate_should_retry(&exec, err, &end))
506 goto out_unlock_outer;
510 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
514 err = xe_vm_rebind(vm, true);
518 /* Wait on rebinds and munmap style VM unbinds */
519 wait = dma_resv_wait_timeout(xe_vm_resv(vm),
520 DMA_RESV_USAGE_KERNEL,
521 false, MAX_SCHEDULE_TIMEOUT);
527 #define retry_required(__tries, __vm) \
528 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
529 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
530 __xe_vm_userptr_needs_repin(__vm))
532 down_read(&vm->userptr.notifier_lock);
533 if (retry_required(tries, vm)) {
534 up_read(&vm->userptr.notifier_lock);
539 #undef retry_required
541 spin_lock(&vm->xe->ttm.lru_lock);
542 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
543 spin_unlock(&vm->xe->ttm.lru_lock);
545 /* Point of no return. */
546 arm_preempt_fences(vm, &preempt_fences);
547 resume_and_reinstall_preempt_fences(vm, &exec);
548 up_read(&vm->userptr.notifier_lock);
551 drm_exec_fini(&exec);
553 if (err == -EAGAIN) {
554 trace_xe_vm_rebind_worker_retry(vm);
559 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
564 free_preempt_fences(&preempt_fences);
566 trace_xe_vm_rebind_worker_exit(vm);
569 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
570 const struct mmu_notifier_range *range,
571 unsigned long cur_seq)
573 struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
574 struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
575 struct xe_vma *vma = &uvma->vma;
576 struct xe_vm *vm = xe_vma_vm(vma);
577 struct dma_resv_iter cursor;
578 struct dma_fence *fence;
581 xe_assert(vm->xe, xe_vma_is_userptr(vma));
582 trace_xe_vma_userptr_invalidate(vma);
584 if (!mmu_notifier_range_blockable(range))
587 vm_dbg(&xe_vma_vm(vma)->xe->drm,
588 "NOTIFIER: addr=0x%016llx, range=0x%016llx",
589 xe_vma_start(vma), xe_vma_size(vma));
591 down_write(&vm->userptr.notifier_lock);
592 mmu_interval_set_seq(mni, cur_seq);
594 /* No need to stop gpu access if the userptr is not yet bound. */
595 if (!userptr->initial_bind) {
596 up_write(&vm->userptr.notifier_lock);
601 * Tell exec and rebind worker they need to repin and rebind this
604 if (!xe_vm_in_fault_mode(vm) &&
605 !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
606 spin_lock(&vm->userptr.invalidated_lock);
607 list_move_tail(&userptr->invalidate_link,
608 &vm->userptr.invalidated);
609 spin_unlock(&vm->userptr.invalidated_lock);
612 up_write(&vm->userptr.notifier_lock);
615 * Preempt fences turn into schedule disables, pipeline these.
616 * Note that even in fault mode, we need to wait for binds and
617 * unbinds to complete, and those are attached as BOOKMARK fences
620 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
621 DMA_RESV_USAGE_BOOKKEEP);
622 dma_resv_for_each_fence_unlocked(&cursor, fence)
623 dma_fence_enable_sw_signaling(fence);
624 dma_resv_iter_end(&cursor);
626 err = dma_resv_wait_timeout(xe_vm_resv(vm),
627 DMA_RESV_USAGE_BOOKKEEP,
628 false, MAX_SCHEDULE_TIMEOUT);
629 XE_WARN_ON(err <= 0);
631 if (xe_vm_in_fault_mode(vm)) {
632 err = xe_vm_invalidate_vma(vma);
636 trace_xe_vma_userptr_invalidate_complete(vma);
641 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
642 .invalidate = vma_userptr_invalidate,
645 int xe_vm_userptr_pin(struct xe_vm *vm)
647 struct xe_userptr_vma *uvma, *next;
649 LIST_HEAD(tmp_evict);
651 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
652 lockdep_assert_held_write(&vm->lock);
654 /* Collect invalidated userptrs */
655 spin_lock(&vm->userptr.invalidated_lock);
656 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
657 userptr.invalidate_link) {
658 list_del_init(&uvma->userptr.invalidate_link);
659 list_move_tail(&uvma->userptr.repin_link,
660 &vm->userptr.repin_list);
662 spin_unlock(&vm->userptr.invalidated_lock);
664 /* Pin and move to temporary list */
665 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
666 userptr.repin_link) {
667 err = xe_vma_userptr_pin_pages(uvma);
668 if (err == -EFAULT) {
669 list_del_init(&uvma->userptr.repin_link);
671 /* Wait for pending binds */
672 xe_vm_lock(vm, false);
673 dma_resv_wait_timeout(xe_vm_resv(vm),
674 DMA_RESV_USAGE_BOOKKEEP,
675 false, MAX_SCHEDULE_TIMEOUT);
677 err = xe_vm_invalidate_vma(&uvma->vma);
685 list_del_init(&uvma->userptr.repin_link);
686 list_move_tail(&uvma->vma.combined_links.rebind,
695 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
696 * that need repinning.
699 * This function does an advisory check for whether the VM has userptrs that
702 * Return: 0 if there are no indications of userptrs needing repinning,
703 * -EAGAIN if there are.
705 int xe_vm_userptr_check_repin(struct xe_vm *vm)
707 return (list_empty_careful(&vm->userptr.repin_list) &&
708 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
711 static struct dma_fence *
712 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
713 struct xe_sync_entry *syncs, u32 num_syncs,
714 bool first_op, bool last_op);
716 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
718 struct dma_fence *fence;
719 struct xe_vma *vma, *next;
721 lockdep_assert_held(&vm->lock);
722 if (xe_vm_in_lr_mode(vm) && !rebind_worker)
725 xe_vm_assert_held(vm);
726 list_for_each_entry_safe(vma, next, &vm->rebind_list,
727 combined_links.rebind) {
728 xe_assert(vm->xe, vma->tile_present);
730 list_del_init(&vma->combined_links.rebind);
732 trace_xe_vma_rebind_worker(vma);
734 trace_xe_vma_rebind_exec(vma);
735 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
737 return PTR_ERR(fence);
738 dma_fence_put(fence);
744 static void xe_vma_free(struct xe_vma *vma)
746 if (xe_vma_is_userptr(vma))
747 kfree(to_userptr_vma(vma));
752 #define VMA_CREATE_FLAG_READ_ONLY BIT(0)
753 #define VMA_CREATE_FLAG_IS_NULL BIT(1)
754 #define VMA_CREATE_FLAG_DUMPABLE BIT(2)
756 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
758 u64 bo_offset_or_userptr,
760 u16 pat_index, unsigned int flags)
763 struct xe_tile *tile;
765 bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
766 bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
767 bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
769 xe_assert(vm->xe, start < end);
770 xe_assert(vm->xe, end < vm->size);
773 * Allocate and ensure that the xe_vma_is_userptr() return
774 * matches what was allocated.
776 if (!bo && !is_null) {
777 struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
780 return ERR_PTR(-ENOMEM);
784 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
786 return ERR_PTR(-ENOMEM);
789 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
791 vma->gpuva.gem.obj = &bo->ttm.base;
794 INIT_LIST_HEAD(&vma->combined_links.rebind);
796 INIT_LIST_HEAD(&vma->gpuva.gem.entry);
797 vma->gpuva.vm = &vm->gpuvm;
798 vma->gpuva.va.addr = start;
799 vma->gpuva.va.range = end - start + 1;
801 vma->gpuva.flags |= XE_VMA_READ_ONLY;
803 vma->gpuva.flags |= XE_VMA_DUMPABLE;
805 for_each_tile(tile, vm->xe, id)
806 vma->tile_mask |= 0x1 << id;
808 if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
809 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
811 vma->pat_index = pat_index;
814 struct drm_gpuvm_bo *vm_bo;
816 xe_bo_assert_held(bo);
818 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
821 return ERR_CAST(vm_bo);
824 drm_gpuvm_bo_extobj_add(vm_bo);
825 drm_gem_object_get(&bo->ttm.base);
826 vma->gpuva.gem.offset = bo_offset_or_userptr;
827 drm_gpuva_link(&vma->gpuva, vm_bo);
828 drm_gpuvm_bo_put(vm_bo);
829 } else /* userptr or null */ {
831 struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
832 u64 size = end - start + 1;
835 INIT_LIST_HEAD(&userptr->invalidate_link);
836 INIT_LIST_HEAD(&userptr->repin_link);
837 vma->gpuva.gem.offset = bo_offset_or_userptr;
839 err = mmu_interval_notifier_insert(&userptr->notifier,
841 xe_vma_userptr(vma), size,
842 &vma_userptr_notifier_ops);
848 userptr->notifier_seq = LONG_MAX;
857 static void xe_vma_destroy_late(struct xe_vma *vma)
859 struct xe_vm *vm = xe_vma_vm(vma);
862 xe_sync_ufence_put(vma->ufence);
866 if (xe_vma_is_userptr(vma)) {
867 struct xe_userptr_vma *uvma = to_userptr_vma(vma);
868 struct xe_userptr *userptr = &uvma->userptr;
871 xe_hmm_userptr_free_sg(uvma);
874 * Since userptr pages are not pinned, we can't remove
875 * the notifer until we're sure the GPU is not accessing
878 mmu_interval_notifier_remove(&userptr->notifier);
880 } else if (xe_vma_is_null(vma)) {
883 xe_bo_put(xe_vma_bo(vma));
889 static void vma_destroy_work_func(struct work_struct *w)
892 container_of(w, struct xe_vma, destroy_work);
894 xe_vma_destroy_late(vma);
897 static void vma_destroy_cb(struct dma_fence *fence,
898 struct dma_fence_cb *cb)
900 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
902 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
903 queue_work(system_unbound_wq, &vma->destroy_work);
906 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
908 struct xe_vm *vm = xe_vma_vm(vma);
910 lockdep_assert_held_write(&vm->lock);
911 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
913 if (xe_vma_is_userptr(vma)) {
914 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
916 spin_lock(&vm->userptr.invalidated_lock);
917 list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
918 spin_unlock(&vm->userptr.invalidated_lock);
919 } else if (!xe_vma_is_null(vma)) {
920 xe_bo_assert_held(xe_vma_bo(vma));
922 drm_gpuva_unlink(&vma->gpuva);
925 xe_vm_assert_held(vm);
927 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
931 XE_WARN_ON(ret != -ENOENT);
932 xe_vma_destroy_late(vma);
935 xe_vma_destroy_late(vma);
940 * xe_vm_lock_vma() - drm_exec utility to lock a vma
941 * @exec: The drm_exec object we're currently locking for.
942 * @vma: The vma for witch we want to lock the vm resv and any attached
945 * Return: 0 on success, negative error code on error. In particular
946 * may return -EDEADLK on WW transaction contention and -EINTR if
947 * an interruptible wait is terminated by a signal.
949 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
951 struct xe_vm *vm = xe_vma_vm(vma);
952 struct xe_bo *bo = xe_vma_bo(vma);
957 err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
958 if (!err && bo && !bo->vm)
959 err = drm_exec_lock_obj(exec, &bo->ttm.base);
964 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
966 struct drm_exec exec;
969 drm_exec_init(&exec, 0, 0);
970 drm_exec_until_all_locked(&exec) {
971 err = xe_vm_lock_vma(&exec, vma);
972 drm_exec_retry_on_contention(&exec);
977 xe_vma_destroy(vma, NULL);
979 drm_exec_fini(&exec);
983 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
985 struct drm_gpuva *gpuva;
987 lockdep_assert_held(&vm->lock);
989 if (xe_vm_is_closed_or_banned(vm))
992 xe_assert(vm->xe, start + range <= vm->size);
994 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
996 return gpuva ? gpuva_to_vma(gpuva) : NULL;
999 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1003 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1004 lockdep_assert_held(&vm->lock);
1006 mutex_lock(&vm->snap_mutex);
1007 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1008 mutex_unlock(&vm->snap_mutex);
1009 XE_WARN_ON(err); /* Shouldn't be possible */
1014 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1016 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1017 lockdep_assert_held(&vm->lock);
1019 mutex_lock(&vm->snap_mutex);
1020 drm_gpuva_remove(&vma->gpuva);
1021 mutex_unlock(&vm->snap_mutex);
1022 if (vm->usm.last_fault_vma == vma)
1023 vm->usm.last_fault_vma = NULL;
1026 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1028 struct xe_vma_op *op;
1030 op = kzalloc(sizeof(*op), GFP_KERNEL);
1038 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1040 static const struct drm_gpuvm_ops gpuvm_ops = {
1041 .op_alloc = xe_vm_op_alloc,
1042 .vm_bo_validate = xe_gpuvm_validate,
1043 .vm_free = xe_vm_free,
1046 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1050 if (pat_index & BIT(0))
1051 pte |= XE_PPGTT_PTE_PAT0;
1053 if (pat_index & BIT(1))
1054 pte |= XE_PPGTT_PTE_PAT1;
1059 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1064 if (pat_index & BIT(0))
1065 pte |= XE_PPGTT_PTE_PAT0;
1067 if (pat_index & BIT(1))
1068 pte |= XE_PPGTT_PTE_PAT1;
1070 if (pat_index & BIT(2)) {
1072 pte |= XE_PPGTT_PDE_PDPE_PAT2;
1074 pte |= XE_PPGTT_PTE_PAT2;
1077 if (pat_index & BIT(3))
1078 pte |= XELPG_PPGTT_PTE_PAT3;
1080 if (pat_index & (BIT(4)))
1081 pte |= XE2_PPGTT_PTE_PAT4;
1086 static u64 pte_encode_ps(u32 pt_level)
1088 XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1091 return XE_PDE_PS_2M;
1092 else if (pt_level == 2)
1093 return XE_PDPE_PS_1G;
1098 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1099 const u16 pat_index)
1101 struct xe_device *xe = xe_bo_device(bo);
1104 pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1105 pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1106 pde |= pde_encode_pat_index(xe, pat_index);
1111 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1112 u16 pat_index, u32 pt_level)
1114 struct xe_device *xe = xe_bo_device(bo);
1117 pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1118 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1119 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1120 pte |= pte_encode_ps(pt_level);
1122 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1123 pte |= XE_PPGTT_PTE_DM;
1128 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1129 u16 pat_index, u32 pt_level)
1131 struct xe_device *xe = xe_vma_vm(vma)->xe;
1133 pte |= XE_PAGE_PRESENT;
1135 if (likely(!xe_vma_read_only(vma)))
1138 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1139 pte |= pte_encode_ps(pt_level);
1141 if (unlikely(xe_vma_is_null(vma)))
1147 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1149 u32 pt_level, bool devmem, u64 flags)
1153 /* Avoid passing random bits directly as flags */
1154 xe_assert(xe, !(flags & ~XE_PTE_PS64));
1157 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1158 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1159 pte |= pte_encode_ps(pt_level);
1162 pte |= XE_PPGTT_PTE_DM;
1169 static const struct xe_pt_ops xelp_pt_ops = {
1170 .pte_encode_bo = xelp_pte_encode_bo,
1171 .pte_encode_vma = xelp_pte_encode_vma,
1172 .pte_encode_addr = xelp_pte_encode_addr,
1173 .pde_encode_bo = xelp_pde_encode_bo,
1177 * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1178 * given tile and vm.
1180 * @tile: tile to set up for.
1181 * @vm: vm to set up for.
1183 * Sets up a pagetable tree with one page-table per level and a single
1184 * leaf PTE. All pagetable entries point to the single page-table or,
1185 * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1186 * writes become NOPs.
1188 * Return: 0 on success, negative error code on error.
1190 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1196 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1197 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1198 if (IS_ERR(vm->scratch_pt[id][i]))
1199 return PTR_ERR(vm->scratch_pt[id][i]);
1201 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1207 static void xe_vm_free_scratch(struct xe_vm *vm)
1209 struct xe_tile *tile;
1212 if (!xe_vm_has_scratch(vm))
1215 for_each_tile(tile, vm->xe, id) {
1218 if (!vm->pt_root[id])
1221 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1222 if (vm->scratch_pt[id][i])
1223 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1227 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1229 struct drm_gem_object *vm_resv_obj;
1231 int err, number_tiles = 0;
1232 struct xe_tile *tile;
1235 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1237 return ERR_PTR(-ENOMEM);
1241 vm->size = 1ull << xe->info.va_bits;
1245 init_rwsem(&vm->lock);
1246 mutex_init(&vm->snap_mutex);
1248 INIT_LIST_HEAD(&vm->rebind_list);
1250 INIT_LIST_HEAD(&vm->userptr.repin_list);
1251 INIT_LIST_HEAD(&vm->userptr.invalidated);
1252 init_rwsem(&vm->userptr.notifier_lock);
1253 spin_lock_init(&vm->userptr.invalidated_lock);
1255 INIT_LIST_HEAD(&vm->preempt.exec_queues);
1256 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1258 for_each_tile(tile, xe, id)
1259 xe_range_fence_tree_init(&vm->rftree[id]);
1261 vm->pt_ops = &xelp_pt_ops;
1263 if (!(flags & XE_VM_FLAG_MIGRATION))
1264 xe_pm_runtime_get_noresume(xe);
1266 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1272 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1273 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1275 drm_gem_object_put(vm_resv_obj);
1277 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1281 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1282 vm->flags |= XE_VM_FLAG_64K;
1284 for_each_tile(tile, xe, id) {
1285 if (flags & XE_VM_FLAG_MIGRATION &&
1286 tile->id != XE_VM_FLAG_TILE_ID(flags))
1289 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1290 if (IS_ERR(vm->pt_root[id])) {
1291 err = PTR_ERR(vm->pt_root[id]);
1292 vm->pt_root[id] = NULL;
1293 goto err_unlock_close;
1297 if (xe_vm_has_scratch(vm)) {
1298 for_each_tile(tile, xe, id) {
1299 if (!vm->pt_root[id])
1302 err = xe_vm_create_scratch(xe, tile, vm);
1304 goto err_unlock_close;
1306 vm->batch_invalidate_tlb = true;
1309 if (vm->flags & XE_VM_FLAG_LR_MODE) {
1310 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1311 vm->batch_invalidate_tlb = false;
1314 /* Fill pt_root after allocating scratch tables */
1315 for_each_tile(tile, xe, id) {
1316 if (!vm->pt_root[id])
1319 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1321 dma_resv_unlock(xe_vm_resv(vm));
1323 /* Kernel migration VM shouldn't have a circular loop.. */
1324 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1325 for_each_tile(tile, xe, id) {
1326 struct xe_gt *gt = tile->primary_gt;
1327 struct xe_vm *migrate_vm;
1328 struct xe_exec_queue *q;
1329 u32 create_flags = EXEC_QUEUE_FLAG_VM;
1331 if (!vm->pt_root[id])
1334 migrate_vm = xe_migrate_get_vm(tile->migrate);
1335 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1336 XE_ENGINE_CLASS_COPY,
1338 xe_vm_put(migrate_vm);
1348 if (number_tiles > 1)
1349 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1351 mutex_lock(&xe->usm.lock);
1352 if (flags & XE_VM_FLAG_FAULT_MODE)
1353 xe->usm.num_vm_in_fault_mode++;
1354 else if (!(flags & XE_VM_FLAG_MIGRATION))
1355 xe->usm.num_vm_in_non_fault_mode++;
1356 mutex_unlock(&xe->usm.lock);
1358 trace_xe_vm_create(vm);
1363 dma_resv_unlock(xe_vm_resv(vm));
1365 xe_vm_close_and_put(vm);
1366 return ERR_PTR(err);
1369 mutex_destroy(&vm->snap_mutex);
1370 for_each_tile(tile, xe, id)
1371 xe_range_fence_tree_fini(&vm->rftree[id]);
1373 if (!(flags & XE_VM_FLAG_MIGRATION))
1374 xe_pm_runtime_put(xe);
1375 return ERR_PTR(err);
1378 static void xe_vm_close(struct xe_vm *vm)
1380 down_write(&vm->lock);
1382 up_write(&vm->lock);
1385 void xe_vm_close_and_put(struct xe_vm *vm)
1387 LIST_HEAD(contested);
1388 struct xe_device *xe = vm->xe;
1389 struct xe_tile *tile;
1390 struct xe_vma *vma, *next_vma;
1391 struct drm_gpuva *gpuva, *next;
1394 xe_assert(xe, !vm->preempt.num_exec_queues);
1397 if (xe_vm_in_preempt_fence_mode(vm))
1398 flush_work(&vm->preempt.rebind_work);
1400 down_write(&vm->lock);
1401 for_each_tile(tile, xe, id) {
1403 xe_exec_queue_last_fence_put(vm->q[id], vm);
1405 up_write(&vm->lock);
1407 for_each_tile(tile, xe, id) {
1409 xe_exec_queue_kill(vm->q[id]);
1410 xe_exec_queue_put(vm->q[id]);
1415 down_write(&vm->lock);
1416 xe_vm_lock(vm, false);
1417 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1418 vma = gpuva_to_vma(gpuva);
1420 if (xe_vma_has_no_bo(vma)) {
1421 down_read(&vm->userptr.notifier_lock);
1422 vma->gpuva.flags |= XE_VMA_DESTROYED;
1423 up_read(&vm->userptr.notifier_lock);
1426 xe_vm_remove_vma(vm, vma);
1428 /* easy case, remove from VMA? */
1429 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1430 list_del_init(&vma->combined_links.rebind);
1431 xe_vma_destroy(vma, NULL);
1435 list_move_tail(&vma->combined_links.destroy, &contested);
1436 vma->gpuva.flags |= XE_VMA_DESTROYED;
1440 * All vm operations will add shared fences to resv.
1441 * The only exception is eviction for a shared object,
1442 * but even so, the unbind when evicted would still
1443 * install a fence to resv. Hence it's safe to
1444 * destroy the pagetables immediately.
1446 xe_vm_free_scratch(vm);
1448 for_each_tile(tile, xe, id) {
1449 if (vm->pt_root[id]) {
1450 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1451 vm->pt_root[id] = NULL;
1457 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1458 * Since we hold a refcount to the bo, we can remove and free
1459 * the members safely without locking.
1461 list_for_each_entry_safe(vma, next_vma, &contested,
1462 combined_links.destroy) {
1463 list_del_init(&vma->combined_links.destroy);
1464 xe_vma_destroy_unlocked(vma);
1467 up_write(&vm->lock);
1469 mutex_lock(&xe->usm.lock);
1470 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1471 xe->usm.num_vm_in_fault_mode--;
1472 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1473 xe->usm.num_vm_in_non_fault_mode--;
1478 xe_assert(xe, xe->info.has_asid);
1479 xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1481 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1482 xe_assert(xe, lookup == vm);
1484 mutex_unlock(&xe->usm.lock);
1486 for_each_tile(tile, xe, id)
1487 xe_range_fence_tree_fini(&vm->rftree[id]);
1492 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1494 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1495 struct xe_device *xe = vm->xe;
1496 struct xe_tile *tile;
1499 /* xe_vm_close_and_put was not called? */
1500 xe_assert(xe, !vm->size);
1502 if (xe_vm_in_preempt_fence_mode(vm))
1503 flush_work(&vm->preempt.rebind_work);
1505 mutex_destroy(&vm->snap_mutex);
1507 if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1508 xe_pm_runtime_put(xe);
1510 for_each_tile(tile, xe, id)
1511 XE_WARN_ON(vm->pt_root[id]);
1513 trace_xe_vm_free(vm);
1517 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1521 mutex_lock(&xef->vm.lock);
1522 vm = xa_load(&xef->vm.xa, id);
1525 mutex_unlock(&xef->vm.lock);
1530 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1532 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1533 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1536 static struct xe_exec_queue *
1537 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1539 return q ? q : vm->q[0];
1542 static struct dma_fence *
1543 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1544 struct xe_sync_entry *syncs, u32 num_syncs,
1545 bool first_op, bool last_op)
1547 struct xe_vm *vm = xe_vma_vm(vma);
1548 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1549 struct xe_tile *tile;
1550 struct dma_fence *fence = NULL;
1551 struct dma_fence **fences = NULL;
1552 struct dma_fence_array *cf = NULL;
1553 int cur_fence = 0, i;
1554 int number_tiles = hweight8(vma->tile_present);
1558 trace_xe_vma_unbind(vma);
1561 struct xe_user_fence * const f = vma->ufence;
1563 if (!xe_sync_ufence_get_status(f))
1564 return ERR_PTR(-EBUSY);
1567 xe_sync_ufence_put(f);
1570 if (number_tiles > 1) {
1571 fences = kmalloc_array(number_tiles, sizeof(*fences),
1574 return ERR_PTR(-ENOMEM);
1577 for_each_tile(tile, vm->xe, id) {
1578 if (!(vma->tile_present & BIT(id)))
1581 fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1582 first_op ? syncs : NULL,
1583 first_op ? num_syncs : 0);
1584 if (IS_ERR(fence)) {
1585 err = PTR_ERR(fence);
1590 fences[cur_fence++] = fence;
1593 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1594 q = list_next_entry(q, multi_gt_list);
1598 cf = dma_fence_array_create(number_tiles, fences,
1599 vm->composite_fence_ctx,
1600 vm->composite_fence_seqno++,
1603 --vm->composite_fence_seqno;
1609 fence = cf ? &cf->base : !fence ?
1610 xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1612 for (i = 0; i < num_syncs; i++)
1613 xe_sync_entry_signal(&syncs[i], fence);
1621 dma_fence_put(fences[--cur_fence]);
1625 return ERR_PTR(err);
1628 static struct dma_fence *
1629 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1630 struct xe_sync_entry *syncs, u32 num_syncs,
1631 bool first_op, bool last_op)
1633 struct xe_tile *tile;
1634 struct dma_fence *fence;
1635 struct dma_fence **fences = NULL;
1636 struct dma_fence_array *cf = NULL;
1637 struct xe_vm *vm = xe_vma_vm(vma);
1638 int cur_fence = 0, i;
1639 int number_tiles = hweight8(vma->tile_mask);
1643 trace_xe_vma_bind(vma);
1645 if (number_tiles > 1) {
1646 fences = kmalloc_array(number_tiles, sizeof(*fences),
1649 return ERR_PTR(-ENOMEM);
1652 for_each_tile(tile, vm->xe, id) {
1653 if (!(vma->tile_mask & BIT(id)))
1656 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1657 first_op ? syncs : NULL,
1658 first_op ? num_syncs : 0,
1659 vma->tile_present & BIT(id));
1660 if (IS_ERR(fence)) {
1661 err = PTR_ERR(fence);
1666 fences[cur_fence++] = fence;
1669 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1670 q = list_next_entry(q, multi_gt_list);
1674 cf = dma_fence_array_create(number_tiles, fences,
1675 vm->composite_fence_ctx,
1676 vm->composite_fence_seqno++,
1679 --vm->composite_fence_seqno;
1686 for (i = 0; i < num_syncs; i++)
1687 xe_sync_entry_signal(&syncs[i],
1688 cf ? &cf->base : fence);
1691 return cf ? &cf->base : fence;
1696 dma_fence_put(fences[--cur_fence]);
1700 return ERR_PTR(err);
1703 static struct xe_user_fence *
1704 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1708 for (i = 0; i < num_syncs; i++) {
1709 struct xe_sync_entry *e = &syncs[i];
1711 if (xe_sync_is_ufence(e))
1712 return xe_sync_ufence_get(e);
1718 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1719 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1720 u32 num_syncs, bool immediate, bool first_op,
1723 struct dma_fence *fence;
1724 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1725 struct xe_user_fence *ufence;
1727 xe_vm_assert_held(vm);
1729 ufence = find_ufence_get(syncs, num_syncs);
1730 if (vma->ufence && ufence)
1731 xe_sync_ufence_put(vma->ufence);
1733 vma->ufence = ufence ?: vma->ufence;
1736 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1739 return PTR_ERR(fence);
1743 xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1745 fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1747 for (i = 0; i < num_syncs; i++)
1748 xe_sync_entry_signal(&syncs[i], fence);
1753 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1754 dma_fence_put(fence);
1759 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1760 struct xe_bo *bo, struct xe_sync_entry *syncs,
1761 u32 num_syncs, bool immediate, bool first_op,
1766 xe_vm_assert_held(vm);
1767 xe_bo_assert_held(bo);
1769 if (bo && immediate) {
1770 err = xe_bo_validate(bo, vm, true);
1775 return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1779 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1780 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1781 u32 num_syncs, bool first_op, bool last_op)
1783 struct dma_fence *fence;
1784 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1786 xe_vm_assert_held(vm);
1787 xe_bo_assert_held(xe_vma_bo(vma));
1789 fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1791 return PTR_ERR(fence);
1793 xe_vma_destroy(vma, fence);
1795 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1796 dma_fence_put(fence);
1801 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1802 DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1803 DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1805 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1806 struct drm_file *file)
1808 struct xe_device *xe = to_xe_device(dev);
1809 struct xe_file *xef = to_xe_file(file);
1810 struct drm_xe_vm_create *args = data;
1811 struct xe_tile *tile;
1817 if (XE_IOCTL_DBG(xe, args->extensions))
1820 if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1821 args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1823 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1827 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1830 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1833 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1834 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1837 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1838 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1841 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1842 xe_device_in_non_fault_mode(xe)))
1845 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1846 xe_device_in_fault_mode(xe)))
1849 if (XE_IOCTL_DBG(xe, args->extensions))
1852 if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1853 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1854 if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1855 flags |= XE_VM_FLAG_LR_MODE;
1856 if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1857 flags |= XE_VM_FLAG_FAULT_MODE;
1859 vm = xe_vm_create(xe, flags);
1863 mutex_lock(&xef->vm.lock);
1864 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1865 mutex_unlock(&xef->vm.lock);
1867 goto err_close_and_put;
1869 if (xe->info.has_asid) {
1870 mutex_lock(&xe->usm.lock);
1871 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1872 XA_LIMIT(1, XE_MAX_ASID - 1),
1873 &xe->usm.next_asid, GFP_KERNEL);
1874 mutex_unlock(&xe->usm.lock);
1878 vm->usm.asid = asid;
1884 /* Record BO memory for VM pagetable created against client */
1885 for_each_tile(tile, xe, id)
1886 if (vm->pt_root[id])
1887 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1889 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1890 /* Warning: Security issue - never enable by default */
1891 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1897 mutex_lock(&xef->vm.lock);
1898 xa_erase(&xef->vm.xa, id);
1899 mutex_unlock(&xef->vm.lock);
1901 xe_vm_close_and_put(vm);
1906 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1907 struct drm_file *file)
1909 struct xe_device *xe = to_xe_device(dev);
1910 struct xe_file *xef = to_xe_file(file);
1911 struct drm_xe_vm_destroy *args = data;
1915 if (XE_IOCTL_DBG(xe, args->pad) ||
1916 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1919 mutex_lock(&xef->vm.lock);
1920 vm = xa_load(&xef->vm.xa, args->vm_id);
1921 if (XE_IOCTL_DBG(xe, !vm))
1923 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1926 xa_erase(&xef->vm.xa, args->vm_id);
1927 mutex_unlock(&xef->vm.lock);
1930 xe_vm_close_and_put(vm);
1935 static const u32 region_to_mem_type[] = {
1941 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1942 struct xe_exec_queue *q, u32 region,
1943 struct xe_sync_entry *syncs, u32 num_syncs,
1944 bool first_op, bool last_op)
1946 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1949 xe_assert(vm->xe, region < ARRAY_SIZE(region_to_mem_type));
1951 if (!xe_vma_has_no_bo(vma)) {
1952 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
1957 if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
1958 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
1959 true, first_op, last_op);
1963 /* Nothing to do, signal fences now */
1965 for (i = 0; i < num_syncs; i++) {
1966 struct dma_fence *fence =
1967 xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1969 xe_sync_entry_signal(&syncs[i], fence);
1970 dma_fence_put(fence);
1978 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
1981 down_read(&vm->userptr.notifier_lock);
1982 vma->gpuva.flags |= XE_VMA_DESTROYED;
1983 up_read(&vm->userptr.notifier_lock);
1985 xe_vm_remove_vma(vm, vma);
1989 #define ULL unsigned long long
1991 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
1992 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
1997 case DRM_GPUVA_OP_MAP:
1998 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
1999 (ULL)op->map.va.addr, (ULL)op->map.va.range);
2001 case DRM_GPUVA_OP_REMAP:
2002 vma = gpuva_to_vma(op->remap.unmap->va);
2003 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2004 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2005 op->remap.unmap->keep ? 1 : 0);
2008 "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2009 (ULL)op->remap.prev->va.addr,
2010 (ULL)op->remap.prev->va.range);
2013 "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2014 (ULL)op->remap.next->va.addr,
2015 (ULL)op->remap.next->va.range);
2017 case DRM_GPUVA_OP_UNMAP:
2018 vma = gpuva_to_vma(op->unmap.va);
2019 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2020 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2021 op->unmap.keep ? 1 : 0);
2023 case DRM_GPUVA_OP_PREFETCH:
2024 vma = gpuva_to_vma(op->prefetch.va);
2025 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2026 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2029 drm_warn(&xe->drm, "NOT POSSIBLE");
2033 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2039 * Create operations list from IOCTL arguments, setup operations fields so parse
2040 * and commit steps are decoupled from IOCTL arguments. This step can fail.
2042 static struct drm_gpuva_ops *
2043 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2044 u64 bo_offset_or_userptr, u64 addr, u64 range,
2045 u32 operation, u32 flags,
2046 u32 prefetch_region, u16 pat_index)
2048 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2049 struct drm_gpuva_ops *ops;
2050 struct drm_gpuva_op *__op;
2051 struct drm_gpuvm_bo *vm_bo;
2054 lockdep_assert_held_write(&vm->lock);
2056 vm_dbg(&vm->xe->drm,
2057 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2058 operation, (ULL)addr, (ULL)range,
2059 (ULL)bo_offset_or_userptr);
2061 switch (operation) {
2062 case DRM_XE_VM_BIND_OP_MAP:
2063 case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2064 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2065 obj, bo_offset_or_userptr);
2067 case DRM_XE_VM_BIND_OP_UNMAP:
2068 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2070 case DRM_XE_VM_BIND_OP_PREFETCH:
2071 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2073 case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2074 xe_assert(vm->xe, bo);
2076 err = xe_bo_lock(bo, true);
2078 return ERR_PTR(err);
2080 vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2081 if (IS_ERR(vm_bo)) {
2083 return ERR_CAST(vm_bo);
2086 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2087 drm_gpuvm_bo_put(vm_bo);
2091 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2092 ops = ERR_PTR(-EINVAL);
2097 drm_gpuva_for_each_op(__op, ops) {
2098 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2100 if (__op->op == DRM_GPUVA_OP_MAP) {
2102 flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2104 flags & DRM_XE_VM_BIND_FLAG_READONLY;
2105 op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2106 op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2107 op->map.pat_index = pat_index;
2108 } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2109 op->prefetch.region = prefetch_region;
2112 print_op(vm->xe, __op);
2118 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2119 u16 pat_index, unsigned int flags)
2121 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2122 struct drm_exec exec;
2126 lockdep_assert_held_write(&vm->lock);
2129 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2130 drm_exec_until_all_locked(&exec) {
2133 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2134 drm_exec_retry_on_contention(&exec);
2137 err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2138 drm_exec_retry_on_contention(&exec);
2141 drm_exec_fini(&exec);
2142 return ERR_PTR(err);
2146 vma = xe_vma_create(vm, bo, op->gem.offset,
2147 op->va.addr, op->va.addr +
2148 op->va.range - 1, pat_index, flags);
2150 drm_exec_fini(&exec);
2152 if (xe_vma_is_userptr(vma)) {
2153 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2155 prep_vma_destroy(vm, vma, false);
2156 xe_vma_destroy_unlocked(vma);
2157 return ERR_PTR(err);
2159 } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2160 err = add_preempt_fences(vm, bo);
2162 prep_vma_destroy(vm, vma, false);
2163 xe_vma_destroy_unlocked(vma);
2164 return ERR_PTR(err);
2171 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2173 if (vma->gpuva.flags & XE_VMA_PTE_1G)
2175 else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2177 else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2179 else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2182 return SZ_1G; /* Uninitialized, used max size */
2185 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2189 vma->gpuva.flags |= XE_VMA_PTE_1G;
2192 vma->gpuva.flags |= XE_VMA_PTE_2M;
2195 vma->gpuva.flags |= XE_VMA_PTE_64K;
2198 vma->gpuva.flags |= XE_VMA_PTE_4K;
2203 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2207 lockdep_assert_held_write(&vm->lock);
2209 switch (op->base.op) {
2210 case DRM_GPUVA_OP_MAP:
2211 err |= xe_vm_insert_vma(vm, op->map.vma);
2213 op->flags |= XE_VMA_OP_COMMITTED;
2215 case DRM_GPUVA_OP_REMAP:
2218 gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2220 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2222 op->flags |= XE_VMA_OP_COMMITTED;
2224 if (op->remap.prev) {
2225 err |= xe_vm_insert_vma(vm, op->remap.prev);
2227 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2228 if (!err && op->remap.skip_prev) {
2229 op->remap.prev->tile_present =
2231 op->remap.prev = NULL;
2234 if (op->remap.next) {
2235 err |= xe_vm_insert_vma(vm, op->remap.next);
2237 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2238 if (!err && op->remap.skip_next) {
2239 op->remap.next->tile_present =
2241 op->remap.next = NULL;
2245 /* Adjust for partial unbind after removin VMA from VM */
2247 op->base.remap.unmap->va->va.addr = op->remap.start;
2248 op->base.remap.unmap->va->va.range = op->remap.range;
2252 case DRM_GPUVA_OP_UNMAP:
2253 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2254 op->flags |= XE_VMA_OP_COMMITTED;
2256 case DRM_GPUVA_OP_PREFETCH:
2257 op->flags |= XE_VMA_OP_COMMITTED;
2260 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2267 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2268 struct drm_gpuva_ops *ops,
2269 struct xe_sync_entry *syncs, u32 num_syncs,
2270 struct list_head *ops_list, bool last)
2272 struct xe_device *xe = vm->xe;
2273 struct xe_vma_op *last_op = NULL;
2274 struct drm_gpuva_op *__op;
2277 lockdep_assert_held_write(&vm->lock);
2279 drm_gpuva_for_each_op(__op, ops) {
2280 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2282 bool first = list_empty(ops_list);
2283 unsigned int flags = 0;
2285 INIT_LIST_HEAD(&op->link);
2286 list_add_tail(&op->link, ops_list);
2289 op->flags |= XE_VMA_OP_FIRST;
2290 op->num_syncs = num_syncs;
2296 switch (op->base.op) {
2297 case DRM_GPUVA_OP_MAP:
2299 flags |= op->map.read_only ?
2300 VMA_CREATE_FLAG_READ_ONLY : 0;
2301 flags |= op->map.is_null ?
2302 VMA_CREATE_FLAG_IS_NULL : 0;
2303 flags |= op->map.dumpable ?
2304 VMA_CREATE_FLAG_DUMPABLE : 0;
2306 vma = new_vma(vm, &op->base.map, op->map.pat_index,
2309 return PTR_ERR(vma);
2314 case DRM_GPUVA_OP_REMAP:
2316 struct xe_vma *old =
2317 gpuva_to_vma(op->base.remap.unmap->va);
2319 op->remap.start = xe_vma_start(old);
2320 op->remap.range = xe_vma_size(old);
2322 if (op->base.remap.prev) {
2323 flags |= op->base.remap.unmap->va->flags &
2325 VMA_CREATE_FLAG_READ_ONLY : 0;
2326 flags |= op->base.remap.unmap->va->flags &
2328 VMA_CREATE_FLAG_IS_NULL : 0;
2329 flags |= op->base.remap.unmap->va->flags &
2331 VMA_CREATE_FLAG_DUMPABLE : 0;
2333 vma = new_vma(vm, op->base.remap.prev,
2334 old->pat_index, flags);
2336 return PTR_ERR(vma);
2338 op->remap.prev = vma;
2341 * Userptr creates a new SG mapping so
2342 * we must also rebind.
2344 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2345 IS_ALIGNED(xe_vma_end(vma),
2346 xe_vma_max_pte_size(old));
2347 if (op->remap.skip_prev) {
2348 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2352 op->remap.start = xe_vma_end(vma);
2353 vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2354 (ULL)op->remap.start,
2355 (ULL)op->remap.range);
2359 if (op->base.remap.next) {
2360 flags |= op->base.remap.unmap->va->flags &
2362 VMA_CREATE_FLAG_READ_ONLY : 0;
2363 flags |= op->base.remap.unmap->va->flags &
2365 VMA_CREATE_FLAG_IS_NULL : 0;
2366 flags |= op->base.remap.unmap->va->flags &
2368 VMA_CREATE_FLAG_DUMPABLE : 0;
2370 vma = new_vma(vm, op->base.remap.next,
2371 old->pat_index, flags);
2373 return PTR_ERR(vma);
2375 op->remap.next = vma;
2378 * Userptr creates a new SG mapping so
2379 * we must also rebind.
2381 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2382 IS_ALIGNED(xe_vma_start(vma),
2383 xe_vma_max_pte_size(old));
2384 if (op->remap.skip_next) {
2385 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2389 vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2390 (ULL)op->remap.start,
2391 (ULL)op->remap.range);
2396 case DRM_GPUVA_OP_UNMAP:
2397 case DRM_GPUVA_OP_PREFETCH:
2401 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2406 err = xe_vma_op_commit(vm, op);
2411 /* FIXME: Unhandled corner case */
2412 XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2419 last_op->flags |= XE_VMA_OP_LAST;
2420 last_op->num_syncs = num_syncs;
2421 last_op->syncs = syncs;
2427 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2428 struct xe_vma *vma, struct xe_vma_op *op)
2432 lockdep_assert_held_write(&vm->lock);
2434 err = xe_vm_lock_vma(exec, vma);
2438 xe_vm_assert_held(vm);
2439 xe_bo_assert_held(xe_vma_bo(vma));
2441 switch (op->base.op) {
2442 case DRM_GPUVA_OP_MAP:
2443 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2444 op->syncs, op->num_syncs,
2445 op->map.immediate || !xe_vm_in_fault_mode(vm),
2446 op->flags & XE_VMA_OP_FIRST,
2447 op->flags & XE_VMA_OP_LAST);
2449 case DRM_GPUVA_OP_REMAP:
2451 bool prev = !!op->remap.prev;
2452 bool next = !!op->remap.next;
2454 if (!op->remap.unmap_done) {
2456 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2457 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2459 op->flags & XE_VMA_OP_FIRST,
2460 op->flags & XE_VMA_OP_LAST &&
2464 op->remap.unmap_done = true;
2468 op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2469 err = xe_vm_bind(vm, op->remap.prev, op->q,
2470 xe_vma_bo(op->remap.prev), op->syncs,
2471 op->num_syncs, true, false,
2472 op->flags & XE_VMA_OP_LAST && !next);
2473 op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2476 op->remap.prev = NULL;
2480 op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2481 err = xe_vm_bind(vm, op->remap.next, op->q,
2482 xe_vma_bo(op->remap.next),
2483 op->syncs, op->num_syncs,
2485 op->flags & XE_VMA_OP_LAST);
2486 op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2489 op->remap.next = NULL;
2494 case DRM_GPUVA_OP_UNMAP:
2495 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2496 op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2497 op->flags & XE_VMA_OP_LAST);
2499 case DRM_GPUVA_OP_PREFETCH:
2500 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2501 op->syncs, op->num_syncs,
2502 op->flags & XE_VMA_OP_FIRST,
2503 op->flags & XE_VMA_OP_LAST);
2506 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2510 trace_xe_vma_fail(vma);
2515 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2516 struct xe_vma_op *op)
2518 struct drm_exec exec;
2522 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2523 drm_exec_until_all_locked(&exec) {
2524 err = op_execute(&exec, vm, vma, op);
2525 drm_exec_retry_on_contention(&exec);
2529 drm_exec_fini(&exec);
2531 if (err == -EAGAIN) {
2532 lockdep_assert_held_write(&vm->lock);
2534 if (op->base.op == DRM_GPUVA_OP_REMAP) {
2535 if (!op->remap.unmap_done)
2536 vma = gpuva_to_vma(op->base.remap.unmap->va);
2537 else if (op->remap.prev)
2538 vma = op->remap.prev;
2540 vma = op->remap.next;
2543 if (xe_vma_is_userptr(vma)) {
2544 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2548 trace_xe_vma_fail(vma);
2555 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2559 lockdep_assert_held_write(&vm->lock);
2561 switch (op->base.op) {
2562 case DRM_GPUVA_OP_MAP:
2563 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2565 case DRM_GPUVA_OP_REMAP:
2569 if (!op->remap.unmap_done)
2570 vma = gpuva_to_vma(op->base.remap.unmap->va);
2571 else if (op->remap.prev)
2572 vma = op->remap.prev;
2574 vma = op->remap.next;
2576 ret = __xe_vma_op_execute(vm, vma, op);
2579 case DRM_GPUVA_OP_UNMAP:
2580 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2583 case DRM_GPUVA_OP_PREFETCH:
2584 ret = __xe_vma_op_execute(vm,
2585 gpuva_to_vma(op->base.prefetch.va),
2589 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2595 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2597 bool last = op->flags & XE_VMA_OP_LAST;
2600 while (op->num_syncs--)
2601 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2604 xe_exec_queue_put(op->q);
2606 if (!list_empty(&op->link))
2607 list_del(&op->link);
2609 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2614 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2615 bool post_commit, bool prev_post_commit,
2616 bool next_post_commit)
2618 lockdep_assert_held_write(&vm->lock);
2620 switch (op->base.op) {
2621 case DRM_GPUVA_OP_MAP:
2623 prep_vma_destroy(vm, op->map.vma, post_commit);
2624 xe_vma_destroy_unlocked(op->map.vma);
2627 case DRM_GPUVA_OP_UNMAP:
2629 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2632 down_read(&vm->userptr.notifier_lock);
2633 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2634 up_read(&vm->userptr.notifier_lock);
2636 xe_vm_insert_vma(vm, vma);
2640 case DRM_GPUVA_OP_REMAP:
2642 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2644 if (op->remap.prev) {
2645 prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2646 xe_vma_destroy_unlocked(op->remap.prev);
2648 if (op->remap.next) {
2649 prep_vma_destroy(vm, op->remap.next, next_post_commit);
2650 xe_vma_destroy_unlocked(op->remap.next);
2653 down_read(&vm->userptr.notifier_lock);
2654 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2655 up_read(&vm->userptr.notifier_lock);
2657 xe_vm_insert_vma(vm, vma);
2661 case DRM_GPUVA_OP_PREFETCH:
2665 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2669 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2670 struct drm_gpuva_ops **ops,
2675 for (i = num_ops_list - 1; i >= 0; --i) {
2676 struct drm_gpuva_ops *__ops = ops[i];
2677 struct drm_gpuva_op *__op;
2682 drm_gpuva_for_each_op_reverse(__op, __ops) {
2683 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2685 xe_vma_op_unwind(vm, op,
2686 op->flags & XE_VMA_OP_COMMITTED,
2687 op->flags & XE_VMA_OP_PREV_COMMITTED,
2688 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2691 drm_gpuva_ops_free(&vm->gpuvm, __ops);
2695 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2696 struct list_head *ops_list)
2698 struct xe_vma_op *op, *next;
2701 lockdep_assert_held_write(&vm->lock);
2703 list_for_each_entry_safe(op, next, ops_list, link) {
2704 err = xe_vma_op_execute(vm, op);
2706 drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2709 * FIXME: Killing VM rather than proper error handling
2714 xe_vma_op_cleanup(vm, op);
2720 #define SUPPORTED_FLAGS \
2721 (DRM_XE_VM_BIND_FLAG_READONLY | \
2722 DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
2723 DRM_XE_VM_BIND_FLAG_NULL | \
2724 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2725 #define XE_64K_PAGE_MASK 0xffffull
2726 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2728 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2729 struct drm_xe_vm_bind *args,
2730 struct drm_xe_vm_bind_op **bind_ops)
2735 if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2736 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2739 if (XE_IOCTL_DBG(xe, args->extensions))
2742 if (args->num_binds > 1) {
2743 u64 __user *bind_user =
2744 u64_to_user_ptr(args->vector_of_binds);
2746 *bind_ops = kvmalloc_array(args->num_binds,
2747 sizeof(struct drm_xe_vm_bind_op),
2748 GFP_KERNEL | __GFP_ACCOUNT);
2752 err = __copy_from_user(*bind_ops, bind_user,
2753 sizeof(struct drm_xe_vm_bind_op) *
2755 if (XE_IOCTL_DBG(xe, err)) {
2760 *bind_ops = &args->bind;
2763 for (i = 0; i < args->num_binds; ++i) {
2764 u64 range = (*bind_ops)[i].range;
2765 u64 addr = (*bind_ops)[i].addr;
2766 u32 op = (*bind_ops)[i].op;
2767 u32 flags = (*bind_ops)[i].flags;
2768 u32 obj = (*bind_ops)[i].obj;
2769 u64 obj_offset = (*bind_ops)[i].obj_offset;
2770 u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2771 bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2772 u16 pat_index = (*bind_ops)[i].pat_index;
2775 if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2780 pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2781 (*bind_ops)[i].pat_index = pat_index;
2782 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2783 if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2788 if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2793 if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2794 XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2795 XE_IOCTL_DBG(xe, obj && is_null) ||
2796 XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2797 XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2799 XE_IOCTL_DBG(xe, !obj &&
2800 op == DRM_XE_VM_BIND_OP_MAP &&
2802 XE_IOCTL_DBG(xe, !obj &&
2803 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2804 XE_IOCTL_DBG(xe, addr &&
2805 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2806 XE_IOCTL_DBG(xe, range &&
2807 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2808 XE_IOCTL_DBG(xe, obj &&
2809 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2810 XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2811 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2812 XE_IOCTL_DBG(xe, obj &&
2813 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2814 XE_IOCTL_DBG(xe, prefetch_region &&
2815 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2816 XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2817 xe->info.mem_region_mask)) ||
2818 XE_IOCTL_DBG(xe, obj &&
2819 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2824 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2825 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2826 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2827 XE_IOCTL_DBG(xe, !range &&
2828 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2837 if (args->num_binds > 1)
2842 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
2843 struct xe_exec_queue *q,
2844 struct xe_sync_entry *syncs,
2847 struct dma_fence *fence;
2850 fence = xe_sync_in_fence_get(syncs, num_syncs,
2851 to_wait_exec_queue(vm, q), vm);
2853 return PTR_ERR(fence);
2855 for (i = 0; i < num_syncs; i++)
2856 xe_sync_entry_signal(&syncs[i], fence);
2858 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
2860 dma_fence_put(fence);
2865 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2867 struct xe_device *xe = to_xe_device(dev);
2868 struct xe_file *xef = to_xe_file(file);
2869 struct drm_xe_vm_bind *args = data;
2870 struct drm_xe_sync __user *syncs_user;
2871 struct xe_bo **bos = NULL;
2872 struct drm_gpuva_ops **ops = NULL;
2874 struct xe_exec_queue *q = NULL;
2875 u32 num_syncs, num_ufence = 0;
2876 struct xe_sync_entry *syncs = NULL;
2877 struct drm_xe_vm_bind_op *bind_ops;
2878 LIST_HEAD(ops_list);
2882 err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2886 if (args->exec_queue_id) {
2887 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2888 if (XE_IOCTL_DBG(xe, !q)) {
2893 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2895 goto put_exec_queue;
2899 vm = xe_vm_lookup(xef, args->vm_id);
2900 if (XE_IOCTL_DBG(xe, !vm)) {
2902 goto put_exec_queue;
2905 err = down_write_killable(&vm->lock);
2909 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2911 goto release_vm_lock;
2914 for (i = 0; i < args->num_binds; ++i) {
2915 u64 range = bind_ops[i].range;
2916 u64 addr = bind_ops[i].addr;
2918 if (XE_IOCTL_DBG(xe, range > vm->size) ||
2919 XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2921 goto release_vm_lock;
2925 if (args->num_binds) {
2926 bos = kvcalloc(args->num_binds, sizeof(*bos),
2927 GFP_KERNEL | __GFP_ACCOUNT);
2930 goto release_vm_lock;
2933 ops = kvcalloc(args->num_binds, sizeof(*ops),
2934 GFP_KERNEL | __GFP_ACCOUNT);
2937 goto release_vm_lock;
2941 for (i = 0; i < args->num_binds; ++i) {
2942 struct drm_gem_object *gem_obj;
2943 u64 range = bind_ops[i].range;
2944 u64 addr = bind_ops[i].addr;
2945 u32 obj = bind_ops[i].obj;
2946 u64 obj_offset = bind_ops[i].obj_offset;
2947 u16 pat_index = bind_ops[i].pat_index;
2953 gem_obj = drm_gem_object_lookup(file, obj);
2954 if (XE_IOCTL_DBG(xe, !gem_obj)) {
2958 bos[i] = gem_to_xe_bo(gem_obj);
2960 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
2961 XE_IOCTL_DBG(xe, obj_offset >
2962 bos[i]->size - range)) {
2967 if (bos[i]->flags & XE_BO_FLAG_INTERNAL_64K) {
2968 if (XE_IOCTL_DBG(xe, obj_offset &
2969 XE_64K_PAGE_MASK) ||
2970 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
2971 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
2977 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2978 if (bos[i]->cpu_caching) {
2979 if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2980 bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
2984 } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
2986 * Imported dma-buf from a different device should
2987 * require 1way or 2way coherency since we don't know
2988 * how it was mapped on the CPU. Just assume is it
2989 * potentially cached on CPU side.
2996 if (args->num_syncs) {
2997 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3004 syncs_user = u64_to_user_ptr(args->syncs);
3005 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3006 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3007 &syncs_user[num_syncs],
3008 (xe_vm_in_lr_mode(vm) ?
3009 SYNC_PARSE_FLAG_LR_MODE : 0) |
3011 SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3015 if (xe_sync_is_ufence(&syncs[num_syncs]))
3019 if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3024 if (!args->num_binds) {
3029 for (i = 0; i < args->num_binds; ++i) {
3030 u64 range = bind_ops[i].range;
3031 u64 addr = bind_ops[i].addr;
3032 u32 op = bind_ops[i].op;
3033 u32 flags = bind_ops[i].flags;
3034 u64 obj_offset = bind_ops[i].obj_offset;
3035 u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3036 u16 pat_index = bind_ops[i].pat_index;
3038 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3039 addr, range, op, flags,
3040 prefetch_region, pat_index);
3041 if (IS_ERR(ops[i])) {
3042 err = PTR_ERR(ops[i]);
3047 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3049 i == args->num_binds - 1);
3055 if (list_empty(&ops_list)) {
3062 xe_exec_queue_get(q);
3064 err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3066 up_write(&vm->lock);
3069 xe_exec_queue_put(q);
3072 for (i = 0; bos && i < args->num_binds; ++i)
3077 if (args->num_binds > 1)
3083 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3085 if (err == -ENODATA)
3086 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3088 xe_sync_entry_cleanup(&syncs[num_syncs]);
3092 for (i = 0; i < args->num_binds; ++i)
3095 up_write(&vm->lock);
3100 xe_exec_queue_put(q);
3104 if (args->num_binds > 1)
3110 * xe_vm_lock() - Lock the vm's dma_resv object
3111 * @vm: The struct xe_vm whose lock is to be locked
3112 * @intr: Whether to perform any wait interruptible
3114 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3115 * contended lock was interrupted. If @intr is false, the function
3118 int xe_vm_lock(struct xe_vm *vm, bool intr)
3121 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3123 return dma_resv_lock(xe_vm_resv(vm), NULL);
3127 * xe_vm_unlock() - Unlock the vm's dma_resv object
3128 * @vm: The struct xe_vm whose lock is to be released.
3130 * Unlock a buffer object lock that was locked by xe_vm_lock().
3132 void xe_vm_unlock(struct xe_vm *vm)
3134 dma_resv_unlock(xe_vm_resv(vm));
3138 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3139 * @vma: VMA to invalidate
3141 * Walks a list of page tables leaves which it memset the entries owned by this
3142 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3145 * Returns 0 for success, negative error code otherwise.
3147 int xe_vm_invalidate_vma(struct xe_vma *vma)
3149 struct xe_device *xe = xe_vma_vm(vma)->xe;
3150 struct xe_tile *tile;
3151 u32 tile_needs_invalidate = 0;
3152 int seqno[XE_MAX_TILES_PER_DEVICE];
3156 xe_assert(xe, !xe_vma_is_null(vma));
3157 trace_xe_vma_invalidate(vma);
3159 vm_dbg(&xe_vma_vm(vma)->xe->drm,
3160 "INVALIDATE: addr=0x%016llx, range=0x%016llx",
3161 xe_vma_start(vma), xe_vma_size(vma));
3163 /* Check that we don't race with page-table updates */
3164 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3165 if (xe_vma_is_userptr(vma)) {
3166 WARN_ON_ONCE(!mmu_interval_check_retry
3167 (&to_userptr_vma(vma)->userptr.notifier,
3168 to_userptr_vma(vma)->userptr.notifier_seq));
3169 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3170 DMA_RESV_USAGE_BOOKKEEP));
3173 xe_bo_assert_held(xe_vma_bo(vma));
3177 for_each_tile(tile, xe, id) {
3178 if (xe_pt_zap_ptes(tile, vma)) {
3179 tile_needs_invalidate |= BIT(id);
3182 * FIXME: We potentially need to invalidate multiple
3183 * GTs within the tile
3185 seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3191 for_each_tile(tile, xe, id) {
3192 if (tile_needs_invalidate & BIT(id)) {
3193 ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3199 vma->tile_invalidated = vma->tile_mask;
3204 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3206 struct drm_gpuva *gpuva;
3210 if (!down_read_trylock(&vm->lock)) {
3211 drm_printf(p, " Failed to acquire VM lock to dump capture");
3214 if (vm->pt_root[gt_id]) {
3215 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3216 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3217 drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3218 is_vram ? "VRAM" : "SYS");
3221 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3222 struct xe_vma *vma = gpuva_to_vma(gpuva);
3223 bool is_userptr = xe_vma_is_userptr(vma);
3224 bool is_null = xe_vma_is_null(vma);
3228 } else if (is_userptr) {
3229 struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
3230 struct xe_res_cursor cur;
3233 xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
3234 addr = xe_res_dma(&cur);
3239 addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3240 is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3242 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3243 xe_vma_start(vma), xe_vma_end(vma) - 1,
3245 addr, is_null ? "NULL" : is_userptr ? "USR" :
3246 is_vram ? "VRAM" : "SYS");
3253 struct xe_vm_snapshot {
3254 unsigned long num_snaps;
3260 struct mm_struct *mm;
3264 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3266 unsigned long num_snaps = 0, i;
3267 struct xe_vm_snapshot *snap = NULL;
3268 struct drm_gpuva *gpuva;
3273 mutex_lock(&vm->snap_mutex);
3274 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3275 if (gpuva->flags & XE_VMA_DUMPABLE)
3280 snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3282 snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
3286 snap->num_snaps = num_snaps;
3288 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3289 struct xe_vma *vma = gpuva_to_vma(gpuva);
3290 struct xe_bo *bo = vma->gpuva.gem.obj ?
3291 gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3293 if (!(gpuva->flags & XE_VMA_DUMPABLE))
3296 snap->snap[i].ofs = xe_vma_start(vma);
3297 snap->snap[i].len = xe_vma_size(vma);
3299 snap->snap[i].bo = xe_bo_get(bo);
3300 snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3301 } else if (xe_vma_is_userptr(vma)) {
3302 struct mm_struct *mm =
3303 to_userptr_vma(vma)->userptr.notifier.mm;
3305 if (mmget_not_zero(mm))
3306 snap->snap[i].mm = mm;
3308 snap->snap[i].data = ERR_PTR(-EFAULT);
3310 snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3312 snap->snap[i].data = ERR_PTR(-ENOENT);
3318 mutex_unlock(&vm->snap_mutex);
3322 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3324 if (IS_ERR_OR_NULL(snap))
3327 for (int i = 0; i < snap->num_snaps; i++) {
3328 struct xe_bo *bo = snap->snap[i].bo;
3329 struct iosys_map src;
3332 if (IS_ERR(snap->snap[i].data))
3335 snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3336 if (!snap->snap[i].data) {
3337 snap->snap[i].data = ERR_PTR(-ENOMEM);
3342 dma_resv_lock(bo->ttm.base.resv, NULL);
3343 err = ttm_bo_vmap(&bo->ttm, &src);
3345 xe_map_memcpy_from(xe_bo_device(bo),
3347 &src, snap->snap[i].bo_ofs,
3349 ttm_bo_vunmap(&bo->ttm, &src);
3351 dma_resv_unlock(bo->ttm.base.resv);
3353 void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3355 kthread_use_mm(snap->snap[i].mm);
3356 if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3360 kthread_unuse_mm(snap->snap[i].mm);
3362 mmput(snap->snap[i].mm);
3363 snap->snap[i].mm = NULL;
3367 kvfree(snap->snap[i].data);
3368 snap->snap[i].data = ERR_PTR(err);
3373 snap->snap[i].bo = NULL;
3377 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3381 if (IS_ERR_OR_NULL(snap)) {
3382 drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
3386 for (i = 0; i < snap->num_snaps; i++) {
3387 drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3389 if (IS_ERR(snap->snap[i].data)) {
3390 drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
3391 PTR_ERR(snap->snap[i].data));
3395 drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
3397 for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3398 u32 *val = snap->snap[i].data + j;
3399 char dumped[ASCII85_BUFSZ];
3401 drm_puts(p, ascii85_encode(*val, dumped));
3408 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3412 if (IS_ERR_OR_NULL(snap))
3415 for (i = 0; i < snap->num_snaps; i++) {
3416 if (!IS_ERR(snap->snap[i].data))
3417 kvfree(snap->snap[i].data);
3418 xe_bo_put(snap->snap[i].bo);
3419 if (snap->snap[i].mm)
3420 mmput(snap->snap[i].mm);