]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Merge tag 'amd-drm-next-6.12-2024-08-26' of https://gitlab.freedesktop.org/agd5f...
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_amdkfd.h"
41 #include "amdgpu_gmc.h"
42 #include "amdgpu_xgmi.h"
43 #include "amdgpu_dma_buf.h"
44 #include "amdgpu_res_cursor.h"
45 #include "kfd_svm.h"
46
47 /**
48  * DOC: GPUVM
49  *
50  * GPUVM is the MMU functionality provided on the GPU.
51  * GPUVM is similar to the legacy GART on older asics, however
52  * rather than there being a single global GART table
53  * for the entire GPU, there can be multiple GPUVM page tables active
54  * at any given time.  The GPUVM page tables can contain a mix
55  * VRAM pages and system pages (both memory and MMIO) and system pages
56  * can be mapped as snooped (cached system pages) or unsnooped
57  * (uncached system pages).
58  *
59  * Each active GPUVM has an ID associated with it and there is a page table
60  * linked with each VMID.  When executing a command buffer,
61  * the kernel tells the engine what VMID to use for that command
62  * buffer.  VMIDs are allocated dynamically as commands are submitted.
63  * The userspace drivers maintain their own address space and the kernel
64  * sets up their pages tables accordingly when they submit their
65  * command buffers and a VMID is assigned.
66  * The hardware supports up to 16 active GPUVMs at any given time.
67  *
68  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
69  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
70  * as other features such as encryption and caching attributes.
71  *
72  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
73  * addition to an aperture managed by a page table, VMID 0 also has
74  * several other apertures.  There is an aperture for direct access to VRAM
75  * and there is a legacy AGP aperture which just forwards accesses directly
76  * to the matching system physical addresses (or IOVAs when an IOMMU is
77  * present).  These apertures provide direct access to these memories without
78  * incurring the overhead of a page table.  VMID 0 is used by the kernel
79  * driver for tasks like memory management.
80  *
81  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
82  * For user applications, each application can have their own unique GPUVM
83  * address space.  The application manages the address space and the kernel
84  * driver manages the GPUVM page tables for each process.  If an GPU client
85  * accesses an invalid page, it will generate a GPU page fault, similar to
86  * accessing an invalid page on a CPU.
87  */
88
89 #define START(node) ((node)->start)
90 #define LAST(node) ((node)->last)
91
92 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
93                      START, LAST, static, amdgpu_vm_it)
94
95 #undef START
96 #undef LAST
97
98 /**
99  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
100  */
101 struct amdgpu_prt_cb {
102
103         /**
104          * @adev: amdgpu device
105          */
106         struct amdgpu_device *adev;
107
108         /**
109          * @cb: callback
110          */
111         struct dma_fence_cb cb;
112 };
113
114 /**
115  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
116  */
117 struct amdgpu_vm_tlb_seq_struct {
118         /**
119          * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
120          */
121         struct amdgpu_vm *vm;
122
123         /**
124          * @cb: callback
125          */
126         struct dma_fence_cb cb;
127 };
128
129 /**
130  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
131  *
132  * @adev: amdgpu_device pointer
133  * @vm: amdgpu_vm pointer
134  * @pasid: the pasid the VM is using on this GPU
135  *
136  * Set the pasid this VM is using on this GPU, can also be used to remove the
137  * pasid by passing in zero.
138  *
139  */
140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
141                         u32 pasid)
142 {
143         int r;
144
145         if (vm->pasid == pasid)
146                 return 0;
147
148         if (vm->pasid) {
149                 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
150                 if (r < 0)
151                         return r;
152
153                 vm->pasid = 0;
154         }
155
156         if (pasid) {
157                 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
158                                         GFP_KERNEL));
159                 if (r < 0)
160                         return r;
161
162                 vm->pasid = pasid;
163         }
164
165
166         return 0;
167 }
168
169 /**
170  * amdgpu_vm_bo_evicted - vm_bo is evicted
171  *
172  * @vm_bo: vm_bo which is evicted
173  *
174  * State for PDs/PTs and per VM BOs which are not at the location they should
175  * be.
176  */
177 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
178 {
179         struct amdgpu_vm *vm = vm_bo->vm;
180         struct amdgpu_bo *bo = vm_bo->bo;
181
182         vm_bo->moved = true;
183         spin_lock(&vm_bo->vm->status_lock);
184         if (bo->tbo.type == ttm_bo_type_kernel)
185                 list_move(&vm_bo->vm_status, &vm->evicted);
186         else
187                 list_move_tail(&vm_bo->vm_status, &vm->evicted);
188         spin_unlock(&vm_bo->vm->status_lock);
189 }
190 /**
191  * amdgpu_vm_bo_moved - vm_bo is moved
192  *
193  * @vm_bo: vm_bo which is moved
194  *
195  * State for per VM BOs which are moved, but that change is not yet reflected
196  * in the page tables.
197  */
198 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
199 {
200         spin_lock(&vm_bo->vm->status_lock);
201         list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
202         spin_unlock(&vm_bo->vm->status_lock);
203 }
204
205 /**
206  * amdgpu_vm_bo_idle - vm_bo is idle
207  *
208  * @vm_bo: vm_bo which is now idle
209  *
210  * State for PDs/PTs and per VM BOs which have gone through the state machine
211  * and are now idle.
212  */
213 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
214 {
215         spin_lock(&vm_bo->vm->status_lock);
216         list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
217         spin_unlock(&vm_bo->vm->status_lock);
218         vm_bo->moved = false;
219 }
220
221 /**
222  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
223  *
224  * @vm_bo: vm_bo which is now invalidated
225  *
226  * State for normal BOs which are invalidated and that change not yet reflected
227  * in the PTs.
228  */
229 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
230 {
231         spin_lock(&vm_bo->vm->status_lock);
232         list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
233         spin_unlock(&vm_bo->vm->status_lock);
234 }
235
236 /**
237  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
238  *
239  * @vm_bo: vm_bo which is evicted
240  *
241  * State for BOs used by user mode queues which are not at the location they
242  * should be.
243  */
244 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
245 {
246         vm_bo->moved = true;
247         spin_lock(&vm_bo->vm->status_lock);
248         list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
249         spin_unlock(&vm_bo->vm->status_lock);
250 }
251
252 /**
253  * amdgpu_vm_bo_relocated - vm_bo is reloacted
254  *
255  * @vm_bo: vm_bo which is relocated
256  *
257  * State for PDs/PTs which needs to update their parent PD.
258  * For the root PD, just move to idle state.
259  */
260 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
261 {
262         if (vm_bo->bo->parent) {
263                 spin_lock(&vm_bo->vm->status_lock);
264                 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
265                 spin_unlock(&vm_bo->vm->status_lock);
266         } else {
267                 amdgpu_vm_bo_idle(vm_bo);
268         }
269 }
270
271 /**
272  * amdgpu_vm_bo_done - vm_bo is done
273  *
274  * @vm_bo: vm_bo which is now done
275  *
276  * State for normal BOs which are invalidated and that change has been updated
277  * in the PTs.
278  */
279 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
280 {
281         spin_lock(&vm_bo->vm->status_lock);
282         list_move(&vm_bo->vm_status, &vm_bo->vm->done);
283         spin_unlock(&vm_bo->vm->status_lock);
284 }
285
286 /**
287  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
288  * @vm: the VM which state machine to reset
289  *
290  * Move all vm_bo object in the VM into a state where they will be updated
291  * again during validation.
292  */
293 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
294 {
295         struct amdgpu_vm_bo_base *vm_bo, *tmp;
296
297         spin_lock(&vm->status_lock);
298         list_splice_init(&vm->done, &vm->invalidated);
299         list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
300                 vm_bo->moved = true;
301         list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
302                 struct amdgpu_bo *bo = vm_bo->bo;
303
304                 vm_bo->moved = true;
305                 if (!bo || bo->tbo.type != ttm_bo_type_kernel)
306                         list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
307                 else if (bo->parent)
308                         list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
309         }
310         spin_unlock(&vm->status_lock);
311 }
312
313 /**
314  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
315  *
316  * @base: base structure for tracking BO usage in a VM
317  * @vm: vm to which bo is to be added
318  * @bo: amdgpu buffer object
319  *
320  * Initialize a bo_va_base structure and add it to the appropriate lists
321  *
322  */
323 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
324                             struct amdgpu_vm *vm, struct amdgpu_bo *bo)
325 {
326         base->vm = vm;
327         base->bo = bo;
328         base->next = NULL;
329         INIT_LIST_HEAD(&base->vm_status);
330
331         if (!bo)
332                 return;
333         base->next = bo->vm_bo;
334         bo->vm_bo = base;
335
336         if (!amdgpu_vm_is_bo_always_valid(vm, bo))
337                 return;
338
339         dma_resv_assert_held(vm->root.bo->tbo.base.resv);
340
341         ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
342         if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
343                 amdgpu_vm_bo_relocated(base);
344         else
345                 amdgpu_vm_bo_idle(base);
346
347         if (bo->preferred_domains &
348             amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
349                 return;
350
351         /*
352          * we checked all the prerequisites, but it looks like this per vm bo
353          * is currently evicted. add the bo to the evicted list to make sure it
354          * is validated on next vm use to avoid fault.
355          * */
356         amdgpu_vm_bo_evicted(base);
357 }
358
359 /**
360  * amdgpu_vm_lock_pd - lock PD in drm_exec
361  *
362  * @vm: vm providing the BOs
363  * @exec: drm execution context
364  * @num_fences: number of extra fences to reserve
365  *
366  * Lock the VM root PD in the DRM execution context.
367  */
368 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
369                       unsigned int num_fences)
370 {
371         /* We need at least two fences for the VM PD/PT updates */
372         return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
373                                     2 + num_fences);
374 }
375
376 /**
377  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
378  *
379  * @adev: amdgpu device pointer
380  * @vm: vm providing the BOs
381  *
382  * Move all BOs to the end of LRU and remember their positions to put them
383  * together.
384  */
385 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
386                                 struct amdgpu_vm *vm)
387 {
388         spin_lock(&adev->mman.bdev.lru_lock);
389         ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
390         spin_unlock(&adev->mman.bdev.lru_lock);
391 }
392
393 /* Create scheduler entities for page table updates */
394 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
395                                    struct amdgpu_vm *vm)
396 {
397         int r;
398
399         r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
400                                   adev->vm_manager.vm_pte_scheds,
401                                   adev->vm_manager.vm_pte_num_scheds, NULL);
402         if (r)
403                 goto error;
404
405         return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
406                                      adev->vm_manager.vm_pte_scheds,
407                                      adev->vm_manager.vm_pte_num_scheds, NULL);
408
409 error:
410         drm_sched_entity_destroy(&vm->immediate);
411         return r;
412 }
413
414 /* Destroy the entities for page table updates again */
415 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
416 {
417         drm_sched_entity_destroy(&vm->immediate);
418         drm_sched_entity_destroy(&vm->delayed);
419 }
420
421 /**
422  * amdgpu_vm_generation - return the page table re-generation counter
423  * @adev: the amdgpu_device
424  * @vm: optional VM to check, might be NULL
425  *
426  * Returns a page table re-generation token to allow checking if submissions
427  * are still valid to use this VM. The VM parameter might be NULL in which case
428  * just the VRAM lost counter will be used.
429  */
430 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
431 {
432         uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
433
434         if (!vm)
435                 return result;
436
437         result += lower_32_bits(vm->generation);
438         /* Add one if the page tables will be re-generated on next CS */
439         if (drm_sched_entity_error(&vm->delayed))
440                 ++result;
441
442         return result;
443 }
444
445 /**
446  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
447  *
448  * @adev: amdgpu device pointer
449  * @vm: vm providing the BOs
450  * @ticket: optional reservation ticket used to reserve the VM
451  * @validate: callback to do the validation
452  * @param: parameter for the validation callback
453  *
454  * Validate the page table BOs and per-VM BOs on command submission if
455  * necessary. If a ticket is given, also try to validate evicted user queue
456  * BOs. They must already be reserved with the given ticket.
457  *
458  * Returns:
459  * Validation result.
460  */
461 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
462                        struct ww_acquire_ctx *ticket,
463                        int (*validate)(void *p, struct amdgpu_bo *bo),
464                        void *param)
465 {
466         uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
467         struct amdgpu_vm_bo_base *bo_base;
468         struct amdgpu_bo *shadow;
469         struct amdgpu_bo *bo;
470         int r;
471
472         if (vm->generation != new_vm_generation) {
473                 vm->generation = new_vm_generation;
474                 amdgpu_vm_bo_reset_state_machine(vm);
475                 amdgpu_vm_fini_entities(vm);
476                 r = amdgpu_vm_init_entities(adev, vm);
477                 if (r)
478                         return r;
479         }
480
481         spin_lock(&vm->status_lock);
482         while (!list_empty(&vm->evicted)) {
483                 bo_base = list_first_entry(&vm->evicted,
484                                            struct amdgpu_vm_bo_base,
485                                            vm_status);
486                 spin_unlock(&vm->status_lock);
487
488                 bo = bo_base->bo;
489                 shadow = amdgpu_bo_shadowed(bo);
490
491                 r = validate(param, bo);
492                 if (r)
493                         return r;
494                 if (shadow) {
495                         r = validate(param, shadow);
496                         if (r)
497                                 return r;
498                 }
499
500                 if (bo->tbo.type != ttm_bo_type_kernel) {
501                         amdgpu_vm_bo_moved(bo_base);
502                 } else {
503                         vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
504                         amdgpu_vm_bo_relocated(bo_base);
505                 }
506                 spin_lock(&vm->status_lock);
507         }
508         while (ticket && !list_empty(&vm->evicted_user)) {
509                 bo_base = list_first_entry(&vm->evicted_user,
510                                            struct amdgpu_vm_bo_base,
511                                            vm_status);
512                 spin_unlock(&vm->status_lock);
513
514                 bo = bo_base->bo;
515
516                 if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
517                         struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
518
519                         pr_warn_ratelimited("Evicted user BO is not reserved\n");
520                         if (ti) {
521                                 pr_warn_ratelimited("pid %d\n", ti->pid);
522                                 amdgpu_vm_put_task_info(ti);
523                         }
524
525                         return -EINVAL;
526                 }
527
528                 r = validate(param, bo);
529                 if (r)
530                         return r;
531
532                 amdgpu_vm_bo_invalidated(bo_base);
533
534                 spin_lock(&vm->status_lock);
535         }
536         spin_unlock(&vm->status_lock);
537
538         amdgpu_vm_eviction_lock(vm);
539         vm->evicting = false;
540         amdgpu_vm_eviction_unlock(vm);
541
542         return 0;
543 }
544
545 /**
546  * amdgpu_vm_ready - check VM is ready for updates
547  *
548  * @vm: VM to check
549  *
550  * Check if all VM PDs/PTs are ready for updates
551  *
552  * Returns:
553  * True if VM is not evicting.
554  */
555 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
556 {
557         bool empty;
558         bool ret;
559
560         amdgpu_vm_eviction_lock(vm);
561         ret = !vm->evicting;
562         amdgpu_vm_eviction_unlock(vm);
563
564         spin_lock(&vm->status_lock);
565         empty = list_empty(&vm->evicted);
566         spin_unlock(&vm->status_lock);
567
568         return ret && empty;
569 }
570
571 /**
572  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
573  *
574  * @adev: amdgpu_device pointer
575  */
576 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
577 {
578         const struct amdgpu_ip_block *ip_block;
579         bool has_compute_vm_bug;
580         struct amdgpu_ring *ring;
581         int i;
582
583         has_compute_vm_bug = false;
584
585         ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
586         if (ip_block) {
587                 /* Compute has a VM bug for GFX version < 7.
588                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
589                 if (ip_block->version->major <= 7)
590                         has_compute_vm_bug = true;
591                 else if (ip_block->version->major == 8)
592                         if (adev->gfx.mec_fw_version < 673)
593                                 has_compute_vm_bug = true;
594         }
595
596         for (i = 0; i < adev->num_rings; i++) {
597                 ring = adev->rings[i];
598                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
599                         /* only compute rings */
600                         ring->has_compute_vm_bug = has_compute_vm_bug;
601                 else
602                         ring->has_compute_vm_bug = false;
603         }
604 }
605
606 /**
607  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
608  *
609  * @ring: ring on which the job will be submitted
610  * @job: job to submit
611  *
612  * Returns:
613  * True if sync is needed.
614  */
615 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
616                                   struct amdgpu_job *job)
617 {
618         struct amdgpu_device *adev = ring->adev;
619         unsigned vmhub = ring->vm_hub;
620         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
621
622         if (job->vmid == 0)
623                 return false;
624
625         if (job->vm_needs_flush || ring->has_compute_vm_bug)
626                 return true;
627
628         if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
629                 return true;
630
631         if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
632                 return true;
633
634         return false;
635 }
636
637 /**
638  * amdgpu_vm_flush - hardware flush the vm
639  *
640  * @ring: ring to use for flush
641  * @job:  related job
642  * @need_pipe_sync: is pipe sync needed
643  *
644  * Emit a VM flush when it is necessary.
645  *
646  * Returns:
647  * 0 on success, errno otherwise.
648  */
649 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
650                     bool need_pipe_sync)
651 {
652         struct amdgpu_device *adev = ring->adev;
653         unsigned vmhub = ring->vm_hub;
654         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
655         struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
656         bool spm_update_needed = job->spm_update_needed;
657         bool gds_switch_needed = ring->funcs->emit_gds_switch &&
658                 job->gds_switch_needed;
659         bool vm_flush_needed = job->vm_needs_flush;
660         struct dma_fence *fence = NULL;
661         bool pasid_mapping_needed = false;
662         unsigned int patch;
663         int r;
664
665         if (amdgpu_vmid_had_gpu_reset(adev, id)) {
666                 gds_switch_needed = true;
667                 vm_flush_needed = true;
668                 pasid_mapping_needed = true;
669                 spm_update_needed = true;
670         }
671
672         mutex_lock(&id_mgr->lock);
673         if (id->pasid != job->pasid || !id->pasid_mapping ||
674             !dma_fence_is_signaled(id->pasid_mapping))
675                 pasid_mapping_needed = true;
676         mutex_unlock(&id_mgr->lock);
677
678         gds_switch_needed &= !!ring->funcs->emit_gds_switch;
679         vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
680                         job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
681         pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
682                 ring->funcs->emit_wreg;
683
684         if (adev->gfx.enable_cleaner_shader &&
685             ring->funcs->emit_cleaner_shader &&
686             job->enforce_isolation)
687                 ring->funcs->emit_cleaner_shader(ring);
688
689         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
690                 return 0;
691
692         amdgpu_ring_ib_begin(ring);
693         if (ring->funcs->init_cond_exec)
694                 patch = amdgpu_ring_init_cond_exec(ring,
695                                                    ring->cond_exe_gpu_addr);
696
697         if (need_pipe_sync)
698                 amdgpu_ring_emit_pipeline_sync(ring);
699
700         if (vm_flush_needed) {
701                 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
702                 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
703         }
704
705         if (pasid_mapping_needed)
706                 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
707
708         if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
709                 adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
710
711         if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
712             gds_switch_needed) {
713                 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
714                                             job->gds_size, job->gws_base,
715                                             job->gws_size, job->oa_base,
716                                             job->oa_size);
717         }
718
719         if (vm_flush_needed || pasid_mapping_needed) {
720                 r = amdgpu_fence_emit(ring, &fence, NULL, 0);
721                 if (r)
722                         return r;
723         }
724
725         if (vm_flush_needed) {
726                 mutex_lock(&id_mgr->lock);
727                 dma_fence_put(id->last_flush);
728                 id->last_flush = dma_fence_get(fence);
729                 id->current_gpu_reset_count =
730                         atomic_read(&adev->gpu_reset_counter);
731                 mutex_unlock(&id_mgr->lock);
732         }
733
734         if (pasid_mapping_needed) {
735                 mutex_lock(&id_mgr->lock);
736                 id->pasid = job->pasid;
737                 dma_fence_put(id->pasid_mapping);
738                 id->pasid_mapping = dma_fence_get(fence);
739                 mutex_unlock(&id_mgr->lock);
740         }
741         dma_fence_put(fence);
742
743         amdgpu_ring_patch_cond_exec(ring, patch);
744
745         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
746         if (ring->funcs->emit_switch_buffer) {
747                 amdgpu_ring_emit_switch_buffer(ring);
748                 amdgpu_ring_emit_switch_buffer(ring);
749         }
750
751         amdgpu_ring_ib_end(ring);
752         return 0;
753 }
754
755 /**
756  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
757  *
758  * @vm: requested vm
759  * @bo: requested buffer object
760  *
761  * Find @bo inside the requested vm.
762  * Search inside the @bos vm list for the requested vm
763  * Returns the found bo_va or NULL if none is found
764  *
765  * Object has to be reserved!
766  *
767  * Returns:
768  * Found bo_va or NULL.
769  */
770 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
771                                        struct amdgpu_bo *bo)
772 {
773         struct amdgpu_vm_bo_base *base;
774
775         for (base = bo->vm_bo; base; base = base->next) {
776                 if (base->vm != vm)
777                         continue;
778
779                 return container_of(base, struct amdgpu_bo_va, base);
780         }
781         return NULL;
782 }
783
784 /**
785  * amdgpu_vm_map_gart - Resolve gart mapping of addr
786  *
787  * @pages_addr: optional DMA address to use for lookup
788  * @addr: the unmapped addr
789  *
790  * Look up the physical address of the page that the pte resolves
791  * to.
792  *
793  * Returns:
794  * The pointer for the page table entry.
795  */
796 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
797 {
798         uint64_t result;
799
800         /* page table offset */
801         result = pages_addr[addr >> PAGE_SHIFT];
802
803         /* in case cpu page size != gpu page size*/
804         result |= addr & (~PAGE_MASK);
805
806         result &= 0xFFFFFFFFFFFFF000ULL;
807
808         return result;
809 }
810
811 /**
812  * amdgpu_vm_update_pdes - make sure that all directories are valid
813  *
814  * @adev: amdgpu_device pointer
815  * @vm: requested vm
816  * @immediate: submit immediately to the paging queue
817  *
818  * Makes sure all directories are up to date.
819  *
820  * Returns:
821  * 0 for success, error for failure.
822  */
823 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
824                           struct amdgpu_vm *vm, bool immediate)
825 {
826         struct amdgpu_vm_update_params params;
827         struct amdgpu_vm_bo_base *entry;
828         bool flush_tlb_needed = false;
829         LIST_HEAD(relocated);
830         int r, idx;
831
832         spin_lock(&vm->status_lock);
833         list_splice_init(&vm->relocated, &relocated);
834         spin_unlock(&vm->status_lock);
835
836         if (list_empty(&relocated))
837                 return 0;
838
839         if (!drm_dev_enter(adev_to_drm(adev), &idx))
840                 return -ENODEV;
841
842         memset(&params, 0, sizeof(params));
843         params.adev = adev;
844         params.vm = vm;
845         params.immediate = immediate;
846
847         r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
848         if (r)
849                 goto error;
850
851         list_for_each_entry(entry, &relocated, vm_status) {
852                 /* vm_flush_needed after updating moved PDEs */
853                 flush_tlb_needed |= entry->moved;
854
855                 r = amdgpu_vm_pde_update(&params, entry);
856                 if (r)
857                         goto error;
858         }
859
860         r = vm->update_funcs->commit(&params, &vm->last_update);
861         if (r)
862                 goto error;
863
864         if (flush_tlb_needed)
865                 atomic64_inc(&vm->tlb_seq);
866
867         while (!list_empty(&relocated)) {
868                 entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
869                                          vm_status);
870                 amdgpu_vm_bo_idle(entry);
871         }
872
873 error:
874         drm_dev_exit(idx);
875         return r;
876 }
877
878 /**
879  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
880  * @fence: unused
881  * @cb: the callback structure
882  *
883  * Increments the tlb sequence to make sure that future CS execute a VM flush.
884  */
885 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
886                                  struct dma_fence_cb *cb)
887 {
888         struct amdgpu_vm_tlb_seq_struct *tlb_cb;
889
890         tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
891         atomic64_inc(&tlb_cb->vm->tlb_seq);
892         kfree(tlb_cb);
893 }
894
895 /**
896  * amdgpu_vm_tlb_flush - prepare TLB flush
897  *
898  * @params: parameters for update
899  * @fence: input fence to sync TLB flush with
900  * @tlb_cb: the callback structure
901  *
902  * Increments the tlb sequence to make sure that future CS execute a VM flush.
903  */
904 static void
905 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
906                     struct dma_fence **fence,
907                     struct amdgpu_vm_tlb_seq_struct *tlb_cb)
908 {
909         struct amdgpu_vm *vm = params->vm;
910
911         if (!fence || !*fence)
912                 return;
913
914         tlb_cb->vm = vm;
915         if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
916                                     amdgpu_vm_tlb_seq_cb)) {
917                 dma_fence_put(vm->last_tlb_flush);
918                 vm->last_tlb_flush = dma_fence_get(*fence);
919         } else {
920                 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
921         }
922
923         /* Prepare a TLB flush fence to be attached to PTs */
924         if (!params->unlocked && vm->is_compute_context) {
925                 amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
926
927                 /* Makes sure no PD/PT is freed before the flush */
928                 dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
929                                    DMA_RESV_USAGE_BOOKKEEP);
930         }
931 }
932
933 /**
934  * amdgpu_vm_update_range - update a range in the vm page table
935  *
936  * @adev: amdgpu_device pointer to use for commands
937  * @vm: the VM to update the range
938  * @immediate: immediate submission in a page fault
939  * @unlocked: unlocked invalidation during MM callback
940  * @flush_tlb: trigger tlb invalidation after update completed
941  * @allow_override: change MTYPE for local NUMA nodes
942  * @resv: fences we need to sync to
943  * @start: start of mapped range
944  * @last: last mapped entry
945  * @flags: flags for the entries
946  * @offset: offset into nodes and pages_addr
947  * @vram_base: base for vram mappings
948  * @res: ttm_resource to map
949  * @pages_addr: DMA addresses to use for mapping
950  * @fence: optional resulting fence
951  *
952  * Fill in the page table entries between @start and @last.
953  *
954  * Returns:
955  * 0 for success, negative erro code for failure.
956  */
957 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
958                            bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
959                            struct dma_resv *resv, uint64_t start, uint64_t last,
960                            uint64_t flags, uint64_t offset, uint64_t vram_base,
961                            struct ttm_resource *res, dma_addr_t *pages_addr,
962                            struct dma_fence **fence)
963 {
964         struct amdgpu_vm_tlb_seq_struct *tlb_cb;
965         struct amdgpu_vm_update_params params;
966         struct amdgpu_res_cursor cursor;
967         enum amdgpu_sync_mode sync_mode;
968         int r, idx;
969
970         if (!drm_dev_enter(adev_to_drm(adev), &idx))
971                 return -ENODEV;
972
973         tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
974         if (!tlb_cb) {
975                 drm_dev_exit(idx);
976                 return -ENOMEM;
977         }
978
979         /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
980          * heavy-weight flush TLB unconditionally.
981          */
982         flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
983                      amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
984
985         /*
986          * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
987          */
988         flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
989
990         memset(&params, 0, sizeof(params));
991         params.adev = adev;
992         params.vm = vm;
993         params.immediate = immediate;
994         params.pages_addr = pages_addr;
995         params.unlocked = unlocked;
996         params.needs_flush = flush_tlb;
997         params.allow_override = allow_override;
998         INIT_LIST_HEAD(&params.tlb_flush_waitlist);
999
1000         /* Implicitly sync to command submissions in the same VM before
1001          * unmapping. Sync to moving fences before mapping.
1002          */
1003         if (!(flags & AMDGPU_PTE_VALID))
1004                 sync_mode = AMDGPU_SYNC_EQ_OWNER;
1005         else
1006                 sync_mode = AMDGPU_SYNC_EXPLICIT;
1007
1008         amdgpu_vm_eviction_lock(vm);
1009         if (vm->evicting) {
1010                 r = -EBUSY;
1011                 goto error_free;
1012         }
1013
1014         if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1015                 struct dma_fence *tmp = dma_fence_get_stub();
1016
1017                 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1018                 swap(vm->last_unlocked, tmp);
1019                 dma_fence_put(tmp);
1020         }
1021
1022         r = vm->update_funcs->prepare(&params, resv, sync_mode);
1023         if (r)
1024                 goto error_free;
1025
1026         amdgpu_res_first(pages_addr ? NULL : res, offset,
1027                          (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1028         while (cursor.remaining) {
1029                 uint64_t tmp, num_entries, addr;
1030
1031                 num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1032                 if (pages_addr) {
1033                         bool contiguous = true;
1034
1035                         if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1036                                 uint64_t pfn = cursor.start >> PAGE_SHIFT;
1037                                 uint64_t count;
1038
1039                                 contiguous = pages_addr[pfn + 1] ==
1040                                         pages_addr[pfn] + PAGE_SIZE;
1041
1042                                 tmp = num_entries /
1043                                         AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1044                                 for (count = 2; count < tmp; ++count) {
1045                                         uint64_t idx = pfn + count;
1046
1047                                         if (contiguous != (pages_addr[idx] ==
1048                                             pages_addr[idx - 1] + PAGE_SIZE))
1049                                                 break;
1050                                 }
1051                                 if (!contiguous)
1052                                         count--;
1053                                 num_entries = count *
1054                                         AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1055                         }
1056
1057                         if (!contiguous) {
1058                                 addr = cursor.start;
1059                                 params.pages_addr = pages_addr;
1060                         } else {
1061                                 addr = pages_addr[cursor.start >> PAGE_SHIFT];
1062                                 params.pages_addr = NULL;
1063                         }
1064
1065                 } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1066                         addr = vram_base + cursor.start;
1067                 } else {
1068                         addr = 0;
1069                 }
1070
1071                 tmp = start + num_entries;
1072                 r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1073                 if (r)
1074                         goto error_free;
1075
1076                 amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1077                 start = tmp;
1078         }
1079
1080         r = vm->update_funcs->commit(&params, fence);
1081         if (r)
1082                 goto error_free;
1083
1084         if (params.needs_flush) {
1085                 amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1086                 tlb_cb = NULL;
1087         }
1088
1089         amdgpu_vm_pt_free_list(adev, &params);
1090
1091 error_free:
1092         kfree(tlb_cb);
1093         amdgpu_vm_eviction_unlock(vm);
1094         drm_dev_exit(idx);
1095         return r;
1096 }
1097
1098 static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
1099                                     struct amdgpu_mem_stats *stats)
1100 {
1101         struct amdgpu_vm *vm = bo_va->base.vm;
1102         struct amdgpu_bo *bo = bo_va->base.bo;
1103
1104         if (!bo)
1105                 return;
1106
1107         /*
1108          * For now ignore BOs which are currently locked and potentially
1109          * changing their location.
1110          */
1111         if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
1112             !dma_resv_trylock(bo->tbo.base.resv))
1113                 return;
1114
1115         amdgpu_bo_get_memory(bo, stats);
1116         if (!amdgpu_vm_is_bo_always_valid(vm, bo))
1117                 dma_resv_unlock(bo->tbo.base.resv);
1118 }
1119
1120 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1121                           struct amdgpu_mem_stats *stats)
1122 {
1123         struct amdgpu_bo_va *bo_va, *tmp;
1124
1125         spin_lock(&vm->status_lock);
1126         list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1127                 amdgpu_vm_bo_get_memory(bo_va, stats);
1128
1129         list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1130                 amdgpu_vm_bo_get_memory(bo_va, stats);
1131
1132         list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1133                 amdgpu_vm_bo_get_memory(bo_va, stats);
1134
1135         list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1136                 amdgpu_vm_bo_get_memory(bo_va, stats);
1137
1138         list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1139                 amdgpu_vm_bo_get_memory(bo_va, stats);
1140
1141         list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1142                 amdgpu_vm_bo_get_memory(bo_va, stats);
1143         spin_unlock(&vm->status_lock);
1144 }
1145
1146 /**
1147  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1148  *
1149  * @adev: amdgpu_device pointer
1150  * @bo_va: requested BO and VM object
1151  * @clear: if true clear the entries
1152  *
1153  * Fill in the page table entries for @bo_va.
1154  *
1155  * Returns:
1156  * 0 for success, -EINVAL for failure.
1157  */
1158 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1159                         bool clear)
1160 {
1161         struct amdgpu_bo *bo = bo_va->base.bo;
1162         struct amdgpu_vm *vm = bo_va->base.vm;
1163         struct amdgpu_bo_va_mapping *mapping;
1164         dma_addr_t *pages_addr = NULL;
1165         struct ttm_resource *mem;
1166         struct dma_fence **last_update;
1167         bool flush_tlb = clear;
1168         bool uncached;
1169         struct dma_resv *resv;
1170         uint64_t vram_base;
1171         uint64_t flags;
1172         int r;
1173
1174         if (clear || !bo) {
1175                 mem = NULL;
1176                 resv = vm->root.bo->tbo.base.resv;
1177         } else {
1178                 struct drm_gem_object *obj = &bo->tbo.base;
1179
1180                 resv = bo->tbo.base.resv;
1181                 if (obj->import_attach && bo_va->is_xgmi) {
1182                         struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1183                         struct drm_gem_object *gobj = dma_buf->priv;
1184                         struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1185
1186                         if (abo->tbo.resource &&
1187                             abo->tbo.resource->mem_type == TTM_PL_VRAM)
1188                                 bo = gem_to_amdgpu_bo(gobj);
1189                 }
1190                 mem = bo->tbo.resource;
1191                 if (mem && (mem->mem_type == TTM_PL_TT ||
1192                             mem->mem_type == AMDGPU_PL_PREEMPT))
1193                         pages_addr = bo->tbo.ttm->dma_address;
1194         }
1195
1196         if (bo) {
1197                 struct amdgpu_device *bo_adev;
1198
1199                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1200
1201                 if (amdgpu_bo_encrypted(bo))
1202                         flags |= AMDGPU_PTE_TMZ;
1203
1204                 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1205                 vram_base = bo_adev->vm_manager.vram_base_offset;
1206                 uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1207         } else {
1208                 flags = 0x0;
1209                 vram_base = 0;
1210                 uncached = false;
1211         }
1212
1213         if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1214                 last_update = &vm->last_update;
1215         else
1216                 last_update = &bo_va->last_pt_update;
1217
1218         if (!clear && bo_va->base.moved) {
1219                 flush_tlb = true;
1220                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1221
1222         } else if (bo_va->cleared != clear) {
1223                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1224         }
1225
1226         list_for_each_entry(mapping, &bo_va->invalids, list) {
1227                 uint64_t update_flags = flags;
1228
1229                 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1230                  * but in case of something, we filter the flags in first place
1231                  */
1232                 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1233                         update_flags &= ~AMDGPU_PTE_READABLE;
1234                 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1235                         update_flags &= ~AMDGPU_PTE_WRITEABLE;
1236
1237                 /* Apply ASIC specific mapping flags */
1238                 amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1239
1240                 trace_amdgpu_vm_bo_update(mapping);
1241
1242                 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1243                                            !uncached, resv, mapping->start, mapping->last,
1244                                            update_flags, mapping->offset,
1245                                            vram_base, mem, pages_addr,
1246                                            last_update);
1247                 if (r)
1248                         return r;
1249         }
1250
1251         /* If the BO is not in its preferred location add it back to
1252          * the evicted list so that it gets validated again on the
1253          * next command submission.
1254          */
1255         if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1256                 uint32_t mem_type = bo->tbo.resource->mem_type;
1257
1258                 if (!(bo->preferred_domains &
1259                       amdgpu_mem_type_to_domain(mem_type)))
1260                         amdgpu_vm_bo_evicted(&bo_va->base);
1261                 else
1262                         amdgpu_vm_bo_idle(&bo_va->base);
1263         } else {
1264                 amdgpu_vm_bo_done(&bo_va->base);
1265         }
1266
1267         list_splice_init(&bo_va->invalids, &bo_va->valids);
1268         bo_va->cleared = clear;
1269         bo_va->base.moved = false;
1270
1271         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1272                 list_for_each_entry(mapping, &bo_va->valids, list)
1273                         trace_amdgpu_vm_bo_mapping(mapping);
1274         }
1275
1276         return 0;
1277 }
1278
1279 /**
1280  * amdgpu_vm_update_prt_state - update the global PRT state
1281  *
1282  * @adev: amdgpu_device pointer
1283  */
1284 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1285 {
1286         unsigned long flags;
1287         bool enable;
1288
1289         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1290         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1291         adev->gmc.gmc_funcs->set_prt(adev, enable);
1292         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1293 }
1294
1295 /**
1296  * amdgpu_vm_prt_get - add a PRT user
1297  *
1298  * @adev: amdgpu_device pointer
1299  */
1300 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1301 {
1302         if (!adev->gmc.gmc_funcs->set_prt)
1303                 return;
1304
1305         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1306                 amdgpu_vm_update_prt_state(adev);
1307 }
1308
1309 /**
1310  * amdgpu_vm_prt_put - drop a PRT user
1311  *
1312  * @adev: amdgpu_device pointer
1313  */
1314 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1315 {
1316         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1317                 amdgpu_vm_update_prt_state(adev);
1318 }
1319
1320 /**
1321  * amdgpu_vm_prt_cb - callback for updating the PRT status
1322  *
1323  * @fence: fence for the callback
1324  * @_cb: the callback function
1325  */
1326 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1327 {
1328         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1329
1330         amdgpu_vm_prt_put(cb->adev);
1331         kfree(cb);
1332 }
1333
1334 /**
1335  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1336  *
1337  * @adev: amdgpu_device pointer
1338  * @fence: fence for the callback
1339  */
1340 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1341                                  struct dma_fence *fence)
1342 {
1343         struct amdgpu_prt_cb *cb;
1344
1345         if (!adev->gmc.gmc_funcs->set_prt)
1346                 return;
1347
1348         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1349         if (!cb) {
1350                 /* Last resort when we are OOM */
1351                 if (fence)
1352                         dma_fence_wait(fence, false);
1353
1354                 amdgpu_vm_prt_put(adev);
1355         } else {
1356                 cb->adev = adev;
1357                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1358                                                      amdgpu_vm_prt_cb))
1359                         amdgpu_vm_prt_cb(fence, &cb->cb);
1360         }
1361 }
1362
1363 /**
1364  * amdgpu_vm_free_mapping - free a mapping
1365  *
1366  * @adev: amdgpu_device pointer
1367  * @vm: requested vm
1368  * @mapping: mapping to be freed
1369  * @fence: fence of the unmap operation
1370  *
1371  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1372  */
1373 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1374                                    struct amdgpu_vm *vm,
1375                                    struct amdgpu_bo_va_mapping *mapping,
1376                                    struct dma_fence *fence)
1377 {
1378         if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1379                 amdgpu_vm_add_prt_cb(adev, fence);
1380         kfree(mapping);
1381 }
1382
1383 /**
1384  * amdgpu_vm_prt_fini - finish all prt mappings
1385  *
1386  * @adev: amdgpu_device pointer
1387  * @vm: requested vm
1388  *
1389  * Register a cleanup callback to disable PRT support after VM dies.
1390  */
1391 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1392 {
1393         struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1394         struct dma_resv_iter cursor;
1395         struct dma_fence *fence;
1396
1397         dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1398                 /* Add a callback for each fence in the reservation object */
1399                 amdgpu_vm_prt_get(adev);
1400                 amdgpu_vm_add_prt_cb(adev, fence);
1401         }
1402 }
1403
1404 /**
1405  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1406  *
1407  * @adev: amdgpu_device pointer
1408  * @vm: requested vm
1409  * @fence: optional resulting fence (unchanged if no work needed to be done
1410  * or if an error occurred)
1411  *
1412  * Make sure all freed BOs are cleared in the PT.
1413  * PTs have to be reserved and mutex must be locked!
1414  *
1415  * Returns:
1416  * 0 for success.
1417  *
1418  */
1419 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1420                           struct amdgpu_vm *vm,
1421                           struct dma_fence **fence)
1422 {
1423         struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1424         struct amdgpu_bo_va_mapping *mapping;
1425         uint64_t init_pte_value = 0;
1426         struct dma_fence *f = NULL;
1427         int r;
1428
1429         while (!list_empty(&vm->freed)) {
1430                 mapping = list_first_entry(&vm->freed,
1431                         struct amdgpu_bo_va_mapping, list);
1432                 list_del(&mapping->list);
1433
1434                 r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1435                                            resv, mapping->start, mapping->last,
1436                                            init_pte_value, 0, 0, NULL, NULL,
1437                                            &f);
1438                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1439                 if (r) {
1440                         dma_fence_put(f);
1441                         return r;
1442                 }
1443         }
1444
1445         if (fence && f) {
1446                 dma_fence_put(*fence);
1447                 *fence = f;
1448         } else {
1449                 dma_fence_put(f);
1450         }
1451
1452         return 0;
1453
1454 }
1455
1456 /**
1457  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1458  *
1459  * @adev: amdgpu_device pointer
1460  * @vm: requested vm
1461  * @ticket: optional reservation ticket used to reserve the VM
1462  *
1463  * Make sure all BOs which are moved are updated in the PTs.
1464  *
1465  * Returns:
1466  * 0 for success.
1467  *
1468  * PTs have to be reserved!
1469  */
1470 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1471                            struct amdgpu_vm *vm,
1472                            struct ww_acquire_ctx *ticket)
1473 {
1474         struct amdgpu_bo_va *bo_va;
1475         struct dma_resv *resv;
1476         bool clear, unlock;
1477         int r;
1478
1479         spin_lock(&vm->status_lock);
1480         while (!list_empty(&vm->moved)) {
1481                 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1482                                          base.vm_status);
1483                 spin_unlock(&vm->status_lock);
1484
1485                 /* Per VM BOs never need to bo cleared in the page tables */
1486                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1487                 if (r)
1488                         return r;
1489                 spin_lock(&vm->status_lock);
1490         }
1491
1492         while (!list_empty(&vm->invalidated)) {
1493                 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1494                                          base.vm_status);
1495                 resv = bo_va->base.bo->tbo.base.resv;
1496                 spin_unlock(&vm->status_lock);
1497
1498                 /* Try to reserve the BO to avoid clearing its ptes */
1499                 if (!adev->debug_vm && dma_resv_trylock(resv)) {
1500                         clear = false;
1501                         unlock = true;
1502                 /* The caller is already holding the reservation lock */
1503                 } else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1504                         clear = false;
1505                         unlock = false;
1506                 /* Somebody else is using the BO right now */
1507                 } else {
1508                         clear = true;
1509                         unlock = false;
1510                 }
1511
1512                 r = amdgpu_vm_bo_update(adev, bo_va, clear);
1513
1514                 if (unlock)
1515                         dma_resv_unlock(resv);
1516                 if (r)
1517                         return r;
1518
1519                 /* Remember evicted DMABuf imports in compute VMs for later
1520                  * validation
1521                  */
1522                 if (vm->is_compute_context &&
1523                     bo_va->base.bo->tbo.base.import_attach &&
1524                     (!bo_va->base.bo->tbo.resource ||
1525                      bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1526                         amdgpu_vm_bo_evicted_user(&bo_va->base);
1527
1528                 spin_lock(&vm->status_lock);
1529         }
1530         spin_unlock(&vm->status_lock);
1531
1532         return 0;
1533 }
1534
1535 /**
1536  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1537  *
1538  * @adev: amdgpu_device pointer
1539  * @vm: requested vm
1540  * @flush_type: flush type
1541  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1542  *
1543  * Flush TLB if needed for a compute VM.
1544  *
1545  * Returns:
1546  * 0 for success.
1547  */
1548 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1549                                 struct amdgpu_vm *vm,
1550                                 uint32_t flush_type,
1551                                 uint32_t xcc_mask)
1552 {
1553         uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1554         bool all_hub = false;
1555         int xcc = 0, r = 0;
1556
1557         WARN_ON_ONCE(!vm->is_compute_context);
1558
1559         /*
1560          * It can be that we race and lose here, but that is extremely unlikely
1561          * and the worst thing which could happen is that we flush the changes
1562          * into the TLB once more which is harmless.
1563          */
1564         if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1565                 return 0;
1566
1567         if (adev->family == AMDGPU_FAMILY_AI ||
1568             adev->family == AMDGPU_FAMILY_RV)
1569                 all_hub = true;
1570
1571         for_each_inst(xcc, xcc_mask) {
1572                 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1573                                                    all_hub, xcc);
1574                 if (r)
1575                         break;
1576         }
1577         return r;
1578 }
1579
1580 /**
1581  * amdgpu_vm_bo_add - add a bo to a specific vm
1582  *
1583  * @adev: amdgpu_device pointer
1584  * @vm: requested vm
1585  * @bo: amdgpu buffer object
1586  *
1587  * Add @bo into the requested vm.
1588  * Add @bo to the list of bos associated with the vm
1589  *
1590  * Returns:
1591  * Newly added bo_va or NULL for failure
1592  *
1593  * Object has to be reserved!
1594  */
1595 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1596                                       struct amdgpu_vm *vm,
1597                                       struct amdgpu_bo *bo)
1598 {
1599         struct amdgpu_bo_va *bo_va;
1600
1601         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1602         if (bo_va == NULL) {
1603                 return NULL;
1604         }
1605         amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1606
1607         bo_va->ref_count = 1;
1608         bo_va->last_pt_update = dma_fence_get_stub();
1609         INIT_LIST_HEAD(&bo_va->valids);
1610         INIT_LIST_HEAD(&bo_va->invalids);
1611
1612         if (!bo)
1613                 return bo_va;
1614
1615         dma_resv_assert_held(bo->tbo.base.resv);
1616         if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1617                 bo_va->is_xgmi = true;
1618                 /* Power up XGMI if it can be potentially used */
1619                 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1620         }
1621
1622         return bo_va;
1623 }
1624
1625
1626 /**
1627  * amdgpu_vm_bo_insert_map - insert a new mapping
1628  *
1629  * @adev: amdgpu_device pointer
1630  * @bo_va: bo_va to store the address
1631  * @mapping: the mapping to insert
1632  *
1633  * Insert a new mapping into all structures.
1634  */
1635 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1636                                     struct amdgpu_bo_va *bo_va,
1637                                     struct amdgpu_bo_va_mapping *mapping)
1638 {
1639         struct amdgpu_vm *vm = bo_va->base.vm;
1640         struct amdgpu_bo *bo = bo_va->base.bo;
1641
1642         mapping->bo_va = bo_va;
1643         list_add(&mapping->list, &bo_va->invalids);
1644         amdgpu_vm_it_insert(mapping, &vm->va);
1645
1646         if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1647                 amdgpu_vm_prt_get(adev);
1648
1649         if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1650                 amdgpu_vm_bo_moved(&bo_va->base);
1651
1652         trace_amdgpu_vm_bo_map(bo_va, mapping);
1653 }
1654
1655 /* Validate operation parameters to prevent potential abuse */
1656 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1657                                           struct amdgpu_bo *bo,
1658                                           uint64_t saddr,
1659                                           uint64_t offset,
1660                                           uint64_t size)
1661 {
1662         uint64_t tmp, lpfn;
1663
1664         if (saddr & AMDGPU_GPU_PAGE_MASK
1665             || offset & AMDGPU_GPU_PAGE_MASK
1666             || size & AMDGPU_GPU_PAGE_MASK)
1667                 return -EINVAL;
1668
1669         if (check_add_overflow(saddr, size, &tmp)
1670             || check_add_overflow(offset, size, &tmp)
1671             || size == 0 /* which also leads to end < begin */)
1672                 return -EINVAL;
1673
1674         /* make sure object fit at this offset */
1675         if (bo && offset + size > amdgpu_bo_size(bo))
1676                 return -EINVAL;
1677
1678         /* Ensure last pfn not exceed max_pfn */
1679         lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1680         if (lpfn >= adev->vm_manager.max_pfn)
1681                 return -EINVAL;
1682
1683         return 0;
1684 }
1685
1686 /**
1687  * amdgpu_vm_bo_map - map bo inside a vm
1688  *
1689  * @adev: amdgpu_device pointer
1690  * @bo_va: bo_va to store the address
1691  * @saddr: where to map the BO
1692  * @offset: requested offset in the BO
1693  * @size: BO size in bytes
1694  * @flags: attributes of pages (read/write/valid/etc.)
1695  *
1696  * Add a mapping of the BO at the specefied addr into the VM.
1697  *
1698  * Returns:
1699  * 0 for success, error for failure.
1700  *
1701  * Object has to be reserved and unreserved outside!
1702  */
1703 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1704                      struct amdgpu_bo_va *bo_va,
1705                      uint64_t saddr, uint64_t offset,
1706                      uint64_t size, uint64_t flags)
1707 {
1708         struct amdgpu_bo_va_mapping *mapping, *tmp;
1709         struct amdgpu_bo *bo = bo_va->base.bo;
1710         struct amdgpu_vm *vm = bo_va->base.vm;
1711         uint64_t eaddr;
1712         int r;
1713
1714         r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1715         if (r)
1716                 return r;
1717
1718         saddr /= AMDGPU_GPU_PAGE_SIZE;
1719         eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1720
1721         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1722         if (tmp) {
1723                 /* bo and tmp overlap, invalid addr */
1724                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1725                         "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1726                         tmp->start, tmp->last + 1);
1727                 return -EINVAL;
1728         }
1729
1730         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1731         if (!mapping)
1732                 return -ENOMEM;
1733
1734         mapping->start = saddr;
1735         mapping->last = eaddr;
1736         mapping->offset = offset;
1737         mapping->flags = flags;
1738
1739         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1740
1741         return 0;
1742 }
1743
1744 /**
1745  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1746  *
1747  * @adev: amdgpu_device pointer
1748  * @bo_va: bo_va to store the address
1749  * @saddr: where to map the BO
1750  * @offset: requested offset in the BO
1751  * @size: BO size in bytes
1752  * @flags: attributes of pages (read/write/valid/etc.)
1753  *
1754  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1755  * mappings as we do so.
1756  *
1757  * Returns:
1758  * 0 for success, error for failure.
1759  *
1760  * Object has to be reserved and unreserved outside!
1761  */
1762 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1763                              struct amdgpu_bo_va *bo_va,
1764                              uint64_t saddr, uint64_t offset,
1765                              uint64_t size, uint64_t flags)
1766 {
1767         struct amdgpu_bo_va_mapping *mapping;
1768         struct amdgpu_bo *bo = bo_va->base.bo;
1769         uint64_t eaddr;
1770         int r;
1771
1772         r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1773         if (r)
1774                 return r;
1775
1776         /* Allocate all the needed memory */
1777         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1778         if (!mapping)
1779                 return -ENOMEM;
1780
1781         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1782         if (r) {
1783                 kfree(mapping);
1784                 return r;
1785         }
1786
1787         saddr /= AMDGPU_GPU_PAGE_SIZE;
1788         eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1789
1790         mapping->start = saddr;
1791         mapping->last = eaddr;
1792         mapping->offset = offset;
1793         mapping->flags = flags;
1794
1795         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1796
1797         return 0;
1798 }
1799
1800 /**
1801  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1802  *
1803  * @adev: amdgpu_device pointer
1804  * @bo_va: bo_va to remove the address from
1805  * @saddr: where to the BO is mapped
1806  *
1807  * Remove a mapping of the BO at the specefied addr from the VM.
1808  *
1809  * Returns:
1810  * 0 for success, error for failure.
1811  *
1812  * Object has to be reserved and unreserved outside!
1813  */
1814 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1815                        struct amdgpu_bo_va *bo_va,
1816                        uint64_t saddr)
1817 {
1818         struct amdgpu_bo_va_mapping *mapping;
1819         struct amdgpu_vm *vm = bo_va->base.vm;
1820         bool valid = true;
1821
1822         saddr /= AMDGPU_GPU_PAGE_SIZE;
1823
1824         list_for_each_entry(mapping, &bo_va->valids, list) {
1825                 if (mapping->start == saddr)
1826                         break;
1827         }
1828
1829         if (&mapping->list == &bo_va->valids) {
1830                 valid = false;
1831
1832                 list_for_each_entry(mapping, &bo_va->invalids, list) {
1833                         if (mapping->start == saddr)
1834                                 break;
1835                 }
1836
1837                 if (&mapping->list == &bo_va->invalids)
1838                         return -ENOENT;
1839         }
1840
1841         list_del(&mapping->list);
1842         amdgpu_vm_it_remove(mapping, &vm->va);
1843         mapping->bo_va = NULL;
1844         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1845
1846         if (valid)
1847                 list_add(&mapping->list, &vm->freed);
1848         else
1849                 amdgpu_vm_free_mapping(adev, vm, mapping,
1850                                        bo_va->last_pt_update);
1851
1852         return 0;
1853 }
1854
1855 /**
1856  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1857  *
1858  * @adev: amdgpu_device pointer
1859  * @vm: VM structure to use
1860  * @saddr: start of the range
1861  * @size: size of the range
1862  *
1863  * Remove all mappings in a range, split them as appropriate.
1864  *
1865  * Returns:
1866  * 0 for success, error for failure.
1867  */
1868 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1869                                 struct amdgpu_vm *vm,
1870                                 uint64_t saddr, uint64_t size)
1871 {
1872         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1873         LIST_HEAD(removed);
1874         uint64_t eaddr;
1875         int r;
1876
1877         r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
1878         if (r)
1879                 return r;
1880
1881         saddr /= AMDGPU_GPU_PAGE_SIZE;
1882         eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1883
1884         /* Allocate all the needed memory */
1885         before = kzalloc(sizeof(*before), GFP_KERNEL);
1886         if (!before)
1887                 return -ENOMEM;
1888         INIT_LIST_HEAD(&before->list);
1889
1890         after = kzalloc(sizeof(*after), GFP_KERNEL);
1891         if (!after) {
1892                 kfree(before);
1893                 return -ENOMEM;
1894         }
1895         INIT_LIST_HEAD(&after->list);
1896
1897         /* Now gather all removed mappings */
1898         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1899         while (tmp) {
1900                 /* Remember mapping split at the start */
1901                 if (tmp->start < saddr) {
1902                         before->start = tmp->start;
1903                         before->last = saddr - 1;
1904                         before->offset = tmp->offset;
1905                         before->flags = tmp->flags;
1906                         before->bo_va = tmp->bo_va;
1907                         list_add(&before->list, &tmp->bo_va->invalids);
1908                 }
1909
1910                 /* Remember mapping split at the end */
1911                 if (tmp->last > eaddr) {
1912                         after->start = eaddr + 1;
1913                         after->last = tmp->last;
1914                         after->offset = tmp->offset;
1915                         after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1916                         after->flags = tmp->flags;
1917                         after->bo_va = tmp->bo_va;
1918                         list_add(&after->list, &tmp->bo_va->invalids);
1919                 }
1920
1921                 list_del(&tmp->list);
1922                 list_add(&tmp->list, &removed);
1923
1924                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1925         }
1926
1927         /* And free them up */
1928         list_for_each_entry_safe(tmp, next, &removed, list) {
1929                 amdgpu_vm_it_remove(tmp, &vm->va);
1930                 list_del(&tmp->list);
1931
1932                 if (tmp->start < saddr)
1933                     tmp->start = saddr;
1934                 if (tmp->last > eaddr)
1935                     tmp->last = eaddr;
1936
1937                 tmp->bo_va = NULL;
1938                 list_add(&tmp->list, &vm->freed);
1939                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
1940         }
1941
1942         /* Insert partial mapping before the range */
1943         if (!list_empty(&before->list)) {
1944                 struct amdgpu_bo *bo = before->bo_va->base.bo;
1945
1946                 amdgpu_vm_it_insert(before, &vm->va);
1947                 if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
1948                         amdgpu_vm_prt_get(adev);
1949
1950                 if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
1951                     !before->bo_va->base.moved)
1952                         amdgpu_vm_bo_moved(&before->bo_va->base);
1953         } else {
1954                 kfree(before);
1955         }
1956
1957         /* Insert partial mapping after the range */
1958         if (!list_empty(&after->list)) {
1959                 struct amdgpu_bo *bo = after->bo_va->base.bo;
1960
1961                 amdgpu_vm_it_insert(after, &vm->va);
1962                 if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
1963                         amdgpu_vm_prt_get(adev);
1964
1965                 if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
1966                     !after->bo_va->base.moved)
1967                         amdgpu_vm_bo_moved(&after->bo_va->base);
1968         } else {
1969                 kfree(after);
1970         }
1971
1972         return 0;
1973 }
1974
1975 /**
1976  * amdgpu_vm_bo_lookup_mapping - find mapping by address
1977  *
1978  * @vm: the requested VM
1979  * @addr: the address
1980  *
1981  * Find a mapping by it's address.
1982  *
1983  * Returns:
1984  * The amdgpu_bo_va_mapping matching for addr or NULL
1985  *
1986  */
1987 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
1988                                                          uint64_t addr)
1989 {
1990         return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
1991 }
1992
1993 /**
1994  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1995  *
1996  * @vm: the requested vm
1997  * @ticket: CS ticket
1998  *
1999  * Trace all mappings of BOs reserved during a command submission.
2000  */
2001 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2002 {
2003         struct amdgpu_bo_va_mapping *mapping;
2004
2005         if (!trace_amdgpu_vm_bo_cs_enabled())
2006                 return;
2007
2008         for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2009              mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2010                 if (mapping->bo_va && mapping->bo_va->base.bo) {
2011                         struct amdgpu_bo *bo;
2012
2013                         bo = mapping->bo_va->base.bo;
2014                         if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2015                             ticket)
2016                                 continue;
2017                 }
2018
2019                 trace_amdgpu_vm_bo_cs(mapping);
2020         }
2021 }
2022
2023 /**
2024  * amdgpu_vm_bo_del - remove a bo from a specific vm
2025  *
2026  * @adev: amdgpu_device pointer
2027  * @bo_va: requested bo_va
2028  *
2029  * Remove @bo_va->bo from the requested vm.
2030  *
2031  * Object have to be reserved!
2032  */
2033 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2034                       struct amdgpu_bo_va *bo_va)
2035 {
2036         struct amdgpu_bo_va_mapping *mapping, *next;
2037         struct amdgpu_bo *bo = bo_va->base.bo;
2038         struct amdgpu_vm *vm = bo_va->base.vm;
2039         struct amdgpu_vm_bo_base **base;
2040
2041         dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2042
2043         if (bo) {
2044                 dma_resv_assert_held(bo->tbo.base.resv);
2045                 if (amdgpu_vm_is_bo_always_valid(vm, bo))
2046                         ttm_bo_set_bulk_move(&bo->tbo, NULL);
2047
2048                 for (base = &bo_va->base.bo->vm_bo; *base;
2049                      base = &(*base)->next) {
2050                         if (*base != &bo_va->base)
2051                                 continue;
2052
2053                         *base = bo_va->base.next;
2054                         break;
2055                 }
2056         }
2057
2058         spin_lock(&vm->status_lock);
2059         list_del(&bo_va->base.vm_status);
2060         spin_unlock(&vm->status_lock);
2061
2062         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2063                 list_del(&mapping->list);
2064                 amdgpu_vm_it_remove(mapping, &vm->va);
2065                 mapping->bo_va = NULL;
2066                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2067                 list_add(&mapping->list, &vm->freed);
2068         }
2069         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2070                 list_del(&mapping->list);
2071                 amdgpu_vm_it_remove(mapping, &vm->va);
2072                 amdgpu_vm_free_mapping(adev, vm, mapping,
2073                                        bo_va->last_pt_update);
2074         }
2075
2076         dma_fence_put(bo_va->last_pt_update);
2077
2078         if (bo && bo_va->is_xgmi)
2079                 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2080
2081         kfree(bo_va);
2082 }
2083
2084 /**
2085  * amdgpu_vm_evictable - check if we can evict a VM
2086  *
2087  * @bo: A page table of the VM.
2088  *
2089  * Check if it is possible to evict a VM.
2090  */
2091 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2092 {
2093         struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2094
2095         /* Page tables of a destroyed VM can go away immediately */
2096         if (!bo_base || !bo_base->vm)
2097                 return true;
2098
2099         /* Don't evict VM page tables while they are busy */
2100         if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2101                 return false;
2102
2103         /* Try to block ongoing updates */
2104         if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2105                 return false;
2106
2107         /* Don't evict VM page tables while they are updated */
2108         if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2109                 amdgpu_vm_eviction_unlock(bo_base->vm);
2110                 return false;
2111         }
2112
2113         bo_base->vm->evicting = true;
2114         amdgpu_vm_eviction_unlock(bo_base->vm);
2115         return true;
2116 }
2117
2118 /**
2119  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2120  *
2121  * @adev: amdgpu_device pointer
2122  * @bo: amdgpu buffer object
2123  * @evicted: is the BO evicted
2124  *
2125  * Mark @bo as invalid.
2126  */
2127 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2128                              struct amdgpu_bo *bo, bool evicted)
2129 {
2130         struct amdgpu_vm_bo_base *bo_base;
2131
2132         /* shadow bo doesn't have bo base, its validation needs its parent */
2133         if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
2134                 bo = bo->parent;
2135
2136         for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2137                 struct amdgpu_vm *vm = bo_base->vm;
2138
2139                 if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2140                         amdgpu_vm_bo_evicted(bo_base);
2141                         continue;
2142                 }
2143
2144                 if (bo_base->moved)
2145                         continue;
2146                 bo_base->moved = true;
2147
2148                 if (bo->tbo.type == ttm_bo_type_kernel)
2149                         amdgpu_vm_bo_relocated(bo_base);
2150                 else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2151                         amdgpu_vm_bo_moved(bo_base);
2152                 else
2153                         amdgpu_vm_bo_invalidated(bo_base);
2154         }
2155 }
2156
2157 /**
2158  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2159  *
2160  * @vm_size: VM size
2161  *
2162  * Returns:
2163  * VM page table as power of two
2164  */
2165 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2166 {
2167         /* Total bits covered by PD + PTs */
2168         unsigned bits = ilog2(vm_size) + 18;
2169
2170         /* Make sure the PD is 4K in size up to 8GB address space.
2171            Above that split equal between PD and PTs */
2172         if (vm_size <= 8)
2173                 return (bits - 9);
2174         else
2175                 return ((bits + 3) / 2);
2176 }
2177
2178 /**
2179  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2180  *
2181  * @adev: amdgpu_device pointer
2182  * @min_vm_size: the minimum vm size in GB if it's set auto
2183  * @fragment_size_default: Default PTE fragment size
2184  * @max_level: max VMPT level
2185  * @max_bits: max address space size in bits
2186  *
2187  */
2188 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2189                            uint32_t fragment_size_default, unsigned max_level,
2190                            unsigned max_bits)
2191 {
2192         unsigned int max_size = 1 << (max_bits - 30);
2193         unsigned int vm_size;
2194         uint64_t tmp;
2195
2196         /* adjust vm size first */
2197         if (amdgpu_vm_size != -1) {
2198                 vm_size = amdgpu_vm_size;
2199                 if (vm_size > max_size) {
2200                         dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2201                                  amdgpu_vm_size, max_size);
2202                         vm_size = max_size;
2203                 }
2204         } else {
2205                 struct sysinfo si;
2206                 unsigned int phys_ram_gb;
2207
2208                 /* Optimal VM size depends on the amount of physical
2209                  * RAM available. Underlying requirements and
2210                  * assumptions:
2211                  *
2212                  *  - Need to map system memory and VRAM from all GPUs
2213                  *     - VRAM from other GPUs not known here
2214                  *     - Assume VRAM <= system memory
2215                  *  - On GFX8 and older, VM space can be segmented for
2216                  *    different MTYPEs
2217                  *  - Need to allow room for fragmentation, guard pages etc.
2218                  *
2219                  * This adds up to a rough guess of system memory x3.
2220                  * Round up to power of two to maximize the available
2221                  * VM size with the given page table size.
2222                  */
2223                 si_meminfo(&si);
2224                 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2225                                (1 << 30) - 1) >> 30;
2226                 vm_size = roundup_pow_of_two(
2227                         min(max(phys_ram_gb * 3, min_vm_size), max_size));
2228         }
2229
2230         adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2231
2232         tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2233         if (amdgpu_vm_block_size != -1)
2234                 tmp >>= amdgpu_vm_block_size - 9;
2235         tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2236         adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2237         switch (adev->vm_manager.num_level) {
2238         case 3:
2239                 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2240                 break;
2241         case 2:
2242                 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2243                 break;
2244         case 1:
2245                 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2246                 break;
2247         default:
2248                 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2249         }
2250         /* block size depends on vm size and hw setup*/
2251         if (amdgpu_vm_block_size != -1)
2252                 adev->vm_manager.block_size =
2253                         min((unsigned)amdgpu_vm_block_size, max_bits
2254                             - AMDGPU_GPU_PAGE_SHIFT
2255                             - 9 * adev->vm_manager.num_level);
2256         else if (adev->vm_manager.num_level > 1)
2257                 adev->vm_manager.block_size = 9;
2258         else
2259                 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2260
2261         if (amdgpu_vm_fragment_size == -1)
2262                 adev->vm_manager.fragment_size = fragment_size_default;
2263         else
2264                 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2265
2266         DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2267                  vm_size, adev->vm_manager.num_level + 1,
2268                  adev->vm_manager.block_size,
2269                  adev->vm_manager.fragment_size);
2270 }
2271
2272 /**
2273  * amdgpu_vm_wait_idle - wait for the VM to become idle
2274  *
2275  * @vm: VM object to wait for
2276  * @timeout: timeout to wait for VM to become idle
2277  */
2278 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2279 {
2280         timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2281                                         DMA_RESV_USAGE_BOOKKEEP,
2282                                         true, timeout);
2283         if (timeout <= 0)
2284                 return timeout;
2285
2286         return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2287 }
2288
2289 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2290 {
2291         struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2292
2293         kfree(ti);
2294 }
2295
2296 static inline struct amdgpu_vm *
2297 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2298 {
2299         struct amdgpu_vm *vm;
2300         unsigned long flags;
2301
2302         xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2303         vm = xa_load(&adev->vm_manager.pasids, pasid);
2304         xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2305
2306         return vm;
2307 }
2308
2309 /**
2310  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2311  *
2312  * @task_info: task_info struct under discussion.
2313  *
2314  * frees the vm task_info ptr at the last put
2315  */
2316 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2317 {
2318         kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2319 }
2320
2321 /**
2322  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2323  *
2324  * @vm: VM to get info from
2325  *
2326  * Returns the reference counted task_info structure, which must be
2327  * referenced down with amdgpu_vm_put_task_info.
2328  */
2329 struct amdgpu_task_info *
2330 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2331 {
2332         struct amdgpu_task_info *ti = NULL;
2333
2334         if (vm) {
2335                 ti = vm->task_info;
2336                 kref_get(&vm->task_info->refcount);
2337         }
2338
2339         return ti;
2340 }
2341
2342 /**
2343  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2344  *
2345  * @adev: drm device pointer
2346  * @pasid: PASID identifier for VM
2347  *
2348  * Returns the reference counted task_info structure, which must be
2349  * referenced down with amdgpu_vm_put_task_info.
2350  */
2351 struct amdgpu_task_info *
2352 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2353 {
2354         return amdgpu_vm_get_task_info_vm(
2355                         amdgpu_vm_get_vm_from_pasid(adev, pasid));
2356 }
2357
2358 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2359 {
2360         vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2361         if (!vm->task_info)
2362                 return -ENOMEM;
2363
2364         kref_init(&vm->task_info->refcount);
2365         return 0;
2366 }
2367
2368 /**
2369  * amdgpu_vm_set_task_info - Sets VMs task info.
2370  *
2371  * @vm: vm for which to set the info
2372  */
2373 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2374 {
2375         if (!vm->task_info)
2376                 return;
2377
2378         if (vm->task_info->pid == current->pid)
2379                 return;
2380
2381         vm->task_info->pid = current->pid;
2382         get_task_comm(vm->task_info->task_name, current);
2383
2384         if (current->group_leader->mm != current->mm)
2385                 return;
2386
2387         vm->task_info->tgid = current->group_leader->pid;
2388         get_task_comm(vm->task_info->process_name, current->group_leader);
2389 }
2390
2391 /**
2392  * amdgpu_vm_init - initialize a vm instance
2393  *
2394  * @adev: amdgpu_device pointer
2395  * @vm: requested vm
2396  * @xcp_id: GPU partition selection id
2397  *
2398  * Init @vm fields.
2399  *
2400  * Returns:
2401  * 0 for success, error for failure.
2402  */
2403 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2404                    int32_t xcp_id)
2405 {
2406         struct amdgpu_ip_block *ip_block;
2407         struct amdgpu_bo *root_bo;
2408         struct amdgpu_bo_vm *root;
2409         int r, i;
2410
2411         vm->va = RB_ROOT_CACHED;
2412         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2413                 vm->reserved_vmid[i] = NULL;
2414         INIT_LIST_HEAD(&vm->evicted);
2415         INIT_LIST_HEAD(&vm->evicted_user);
2416         INIT_LIST_HEAD(&vm->relocated);
2417         INIT_LIST_HEAD(&vm->moved);
2418         INIT_LIST_HEAD(&vm->idle);
2419         INIT_LIST_HEAD(&vm->invalidated);
2420         spin_lock_init(&vm->status_lock);
2421         INIT_LIST_HEAD(&vm->freed);
2422         INIT_LIST_HEAD(&vm->done);
2423         INIT_LIST_HEAD(&vm->pt_freed);
2424         INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2425         INIT_KFIFO(vm->faults);
2426
2427         r = amdgpu_vm_init_entities(adev, vm);
2428         if (r)
2429                 return r;
2430
2431         ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2432
2433         vm->is_compute_context = false;
2434
2435         vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2436                                     AMDGPU_VM_USE_CPU_FOR_GFX);
2437
2438         /* use CPU for page table update if SDMA is unavailable */
2439         ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SDMA);
2440         if (!ip_block || ip_block->status.valid == false)
2441                 vm->use_cpu_for_update = true;
2442
2443         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2444                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2445         WARN_ONCE((vm->use_cpu_for_update &&
2446                    !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2447                   "CPU update of VM recommended only for large BAR system\n");
2448
2449         if (vm->use_cpu_for_update)
2450                 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2451         else
2452                 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2453
2454         vm->last_update = dma_fence_get_stub();
2455         vm->last_unlocked = dma_fence_get_stub();
2456         vm->last_tlb_flush = dma_fence_get_stub();
2457         vm->generation = amdgpu_vm_generation(adev, NULL);
2458
2459         mutex_init(&vm->eviction_lock);
2460         vm->evicting = false;
2461         vm->tlb_fence_context = dma_fence_context_alloc(1);
2462
2463         r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2464                                 false, &root, xcp_id);
2465         if (r)
2466                 goto error_free_delayed;
2467
2468         root_bo = amdgpu_bo_ref(&root->bo);
2469         r = amdgpu_bo_reserve(root_bo, true);
2470         if (r) {
2471                 amdgpu_bo_unref(&root->shadow);
2472                 amdgpu_bo_unref(&root_bo);
2473                 goto error_free_delayed;
2474         }
2475
2476         amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2477         r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2478         if (r)
2479                 goto error_free_root;
2480
2481         r = amdgpu_vm_pt_clear(adev, vm, root, false);
2482         if (r)
2483                 goto error_free_root;
2484
2485         r = amdgpu_vm_create_task_info(vm);
2486         if (r)
2487                 DRM_DEBUG("Failed to create task info for VM\n");
2488
2489         amdgpu_bo_unreserve(vm->root.bo);
2490         amdgpu_bo_unref(&root_bo);
2491
2492         return 0;
2493
2494 error_free_root:
2495         amdgpu_vm_pt_free_root(adev, vm);
2496         amdgpu_bo_unreserve(vm->root.bo);
2497         amdgpu_bo_unref(&root_bo);
2498
2499 error_free_delayed:
2500         dma_fence_put(vm->last_tlb_flush);
2501         dma_fence_put(vm->last_unlocked);
2502         ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2503         amdgpu_vm_fini_entities(vm);
2504
2505         return r;
2506 }
2507
2508 /**
2509  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2510  *
2511  * @adev: amdgpu_device pointer
2512  * @vm: requested vm
2513  *
2514  * This only works on GFX VMs that don't have any BOs added and no
2515  * page tables allocated yet.
2516  *
2517  * Changes the following VM parameters:
2518  * - use_cpu_for_update
2519  * - pte_supports_ats
2520  *
2521  * Reinitializes the page directory to reflect the changed ATS
2522  * setting.
2523  *
2524  * Returns:
2525  * 0 for success, -errno for errors.
2526  */
2527 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2528 {
2529         int r;
2530
2531         r = amdgpu_bo_reserve(vm->root.bo, true);
2532         if (r)
2533                 return r;
2534
2535         /* Update VM state */
2536         vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2537                                     AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2538         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2539                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2540         WARN_ONCE((vm->use_cpu_for_update &&
2541                    !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2542                   "CPU update of VM recommended only for large BAR system\n");
2543
2544         if (vm->use_cpu_for_update) {
2545                 /* Sync with last SDMA update/clear before switching to CPU */
2546                 r = amdgpu_bo_sync_wait(vm->root.bo,
2547                                         AMDGPU_FENCE_OWNER_UNDEFINED, true);
2548                 if (r)
2549                         goto unreserve_bo;
2550
2551                 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2552                 r = amdgpu_vm_pt_map_tables(adev, vm);
2553                 if (r)
2554                         goto unreserve_bo;
2555
2556         } else {
2557                 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2558         }
2559
2560         dma_fence_put(vm->last_update);
2561         vm->last_update = dma_fence_get_stub();
2562         vm->is_compute_context = true;
2563
2564         /* Free the shadow bo for compute VM */
2565         amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2566
2567         goto unreserve_bo;
2568
2569 unreserve_bo:
2570         amdgpu_bo_unreserve(vm->root.bo);
2571         return r;
2572 }
2573
2574 /**
2575  * amdgpu_vm_release_compute - release a compute vm
2576  * @adev: amdgpu_device pointer
2577  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2578  *
2579  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2580  * pasid from vm. Compute should stop use of vm after this call.
2581  */
2582 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2583 {
2584         amdgpu_vm_set_pasid(adev, vm, 0);
2585         vm->is_compute_context = false;
2586 }
2587
2588 /**
2589  * amdgpu_vm_fini - tear down a vm instance
2590  *
2591  * @adev: amdgpu_device pointer
2592  * @vm: requested vm
2593  *
2594  * Tear down @vm.
2595  * Unbind the VM and remove all bos from the vm bo list
2596  */
2597 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2598 {
2599         struct amdgpu_bo_va_mapping *mapping, *tmp;
2600         bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2601         struct amdgpu_bo *root;
2602         unsigned long flags;
2603         int i;
2604
2605         amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2606
2607         flush_work(&vm->pt_free_work);
2608
2609         root = amdgpu_bo_ref(vm->root.bo);
2610         amdgpu_bo_reserve(root, true);
2611         amdgpu_vm_put_task_info(vm->task_info);
2612         amdgpu_vm_set_pasid(adev, vm, 0);
2613         dma_fence_wait(vm->last_unlocked, false);
2614         dma_fence_put(vm->last_unlocked);
2615         dma_fence_wait(vm->last_tlb_flush, false);
2616         /* Make sure that all fence callbacks have completed */
2617         spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2618         spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2619         dma_fence_put(vm->last_tlb_flush);
2620
2621         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2622                 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
2623                         amdgpu_vm_prt_fini(adev, vm);
2624                         prt_fini_needed = false;
2625                 }
2626
2627                 list_del(&mapping->list);
2628                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2629         }
2630
2631         amdgpu_vm_pt_free_root(adev, vm);
2632         amdgpu_bo_unreserve(root);
2633         amdgpu_bo_unref(&root);
2634         WARN_ON(vm->root.bo);
2635
2636         amdgpu_vm_fini_entities(vm);
2637
2638         if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2639                 dev_err(adev->dev, "still active bo inside vm\n");
2640         }
2641         rbtree_postorder_for_each_entry_safe(mapping, tmp,
2642                                              &vm->va.rb_root, rb) {
2643                 /* Don't remove the mapping here, we don't want to trigger a
2644                  * rebalance and the tree is about to be destroyed anyway.
2645                  */
2646                 list_del(&mapping->list);
2647                 kfree(mapping);
2648         }
2649
2650         dma_fence_put(vm->last_update);
2651
2652         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2653                 if (vm->reserved_vmid[i]) {
2654                         amdgpu_vmid_free_reserved(adev, i);
2655                         vm->reserved_vmid[i] = false;
2656                 }
2657         }
2658
2659         ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2660 }
2661
2662 /**
2663  * amdgpu_vm_manager_init - init the VM manager
2664  *
2665  * @adev: amdgpu_device pointer
2666  *
2667  * Initialize the VM manager structures
2668  */
2669 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2670 {
2671         unsigned i;
2672
2673         /* Concurrent flushes are only possible starting with Vega10 and
2674          * are broken on Navi10 and Navi14.
2675          */
2676         adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2677                                               adev->asic_type == CHIP_NAVI10 ||
2678                                               adev->asic_type == CHIP_NAVI14);
2679         amdgpu_vmid_mgr_init(adev);
2680
2681         adev->vm_manager.fence_context =
2682                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2683         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2684                 adev->vm_manager.seqno[i] = 0;
2685
2686         spin_lock_init(&adev->vm_manager.prt_lock);
2687         atomic_set(&adev->vm_manager.num_prt_users, 0);
2688
2689         /* If not overridden by the user, by default, only in large BAR systems
2690          * Compute VM tables will be updated by CPU
2691          */
2692 #ifdef CONFIG_X86_64
2693         if (amdgpu_vm_update_mode == -1) {
2694                 /* For asic with VF MMIO access protection
2695                  * avoid using CPU for VM table updates
2696                  */
2697                 if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2698                     !amdgpu_sriov_vf_mmio_access_protection(adev))
2699                         adev->vm_manager.vm_update_mode =
2700                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2701                 else
2702                         adev->vm_manager.vm_update_mode = 0;
2703         } else
2704                 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2705 #else
2706         adev->vm_manager.vm_update_mode = 0;
2707 #endif
2708
2709         xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2710 }
2711
2712 /**
2713  * amdgpu_vm_manager_fini - cleanup VM manager
2714  *
2715  * @adev: amdgpu_device pointer
2716  *
2717  * Cleanup the VM manager and free resources.
2718  */
2719 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2720 {
2721         WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2722         xa_destroy(&adev->vm_manager.pasids);
2723
2724         amdgpu_vmid_mgr_fini(adev);
2725 }
2726
2727 /**
2728  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2729  *
2730  * @dev: drm device pointer
2731  * @data: drm_amdgpu_vm
2732  * @filp: drm file pointer
2733  *
2734  * Returns:
2735  * 0 for success, -errno for errors.
2736  */
2737 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2738 {
2739         union drm_amdgpu_vm *args = data;
2740         struct amdgpu_device *adev = drm_to_adev(dev);
2741         struct amdgpu_fpriv *fpriv = filp->driver_priv;
2742
2743         /* No valid flags defined yet */
2744         if (args->in.flags)
2745                 return -EINVAL;
2746
2747         switch (args->in.op) {
2748         case AMDGPU_VM_OP_RESERVE_VMID:
2749                 /* We only have requirement to reserve vmid from gfxhub */
2750                 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2751                         amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2752                         fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2753                 }
2754
2755                 break;
2756         case AMDGPU_VM_OP_UNRESERVE_VMID:
2757                 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2758                         amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2759                         fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2760                 }
2761                 break;
2762         default:
2763                 return -EINVAL;
2764         }
2765
2766         return 0;
2767 }
2768
2769 /**
2770  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2771  * @adev: amdgpu device pointer
2772  * @pasid: PASID of the VM
2773  * @vmid: VMID, only used for GFX 9.4.3.
2774  * @node_id: Node_id received in IH cookie. Only applicable for
2775  *           GFX 9.4.3.
2776  * @addr: Address of the fault
2777  * @write_fault: true is write fault, false is read fault
2778  *
2779  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2780  * shouldn't be reported any more.
2781  */
2782 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2783                             u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2784                             bool write_fault)
2785 {
2786         bool is_compute_context = false;
2787         struct amdgpu_bo *root;
2788         unsigned long irqflags;
2789         uint64_t value, flags;
2790         struct amdgpu_vm *vm;
2791         int r;
2792
2793         xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2794         vm = xa_load(&adev->vm_manager.pasids, pasid);
2795         if (vm) {
2796                 root = amdgpu_bo_ref(vm->root.bo);
2797                 is_compute_context = vm->is_compute_context;
2798         } else {
2799                 root = NULL;
2800         }
2801         xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2802
2803         if (!root)
2804                 return false;
2805
2806         addr /= AMDGPU_GPU_PAGE_SIZE;
2807
2808         if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2809             node_id, addr, ts, write_fault)) {
2810                 amdgpu_bo_unref(&root);
2811                 return true;
2812         }
2813
2814         r = amdgpu_bo_reserve(root, true);
2815         if (r)
2816                 goto error_unref;
2817
2818         /* Double check that the VM still exists */
2819         xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2820         vm = xa_load(&adev->vm_manager.pasids, pasid);
2821         if (vm && vm->root.bo != root)
2822                 vm = NULL;
2823         xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2824         if (!vm)
2825                 goto error_unlock;
2826
2827         flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2828                 AMDGPU_PTE_SYSTEM;
2829
2830         if (is_compute_context) {
2831                 /* Intentionally setting invalid PTE flag
2832                  * combination to force a no-retry-fault
2833                  */
2834                 flags = AMDGPU_VM_NORETRY_FLAGS;
2835                 value = 0;
2836         } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2837                 /* Redirect the access to the dummy page */
2838                 value = adev->dummy_page_addr;
2839                 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2840                         AMDGPU_PTE_WRITEABLE;
2841
2842         } else {
2843                 /* Let the hw retry silently on the PTE */
2844                 value = 0;
2845         }
2846
2847         r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2848         if (r) {
2849                 pr_debug("failed %d to reserve fence slot\n", r);
2850                 goto error_unlock;
2851         }
2852
2853         r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2854                                    NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2855         if (r)
2856                 goto error_unlock;
2857
2858         r = amdgpu_vm_update_pdes(adev, vm, true);
2859
2860 error_unlock:
2861         amdgpu_bo_unreserve(root);
2862         if (r < 0)
2863                 DRM_ERROR("Can't handle page fault (%d)\n", r);
2864
2865 error_unref:
2866         amdgpu_bo_unref(&root);
2867
2868         return false;
2869 }
2870
2871 #if defined(CONFIG_DEBUG_FS)
2872 /**
2873  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2874  *
2875  * @vm: Requested VM for printing BO info
2876  * @m: debugfs file
2877  *
2878  * Print BO information in debugfs file for the VM
2879  */
2880 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2881 {
2882         struct amdgpu_bo_va *bo_va, *tmp;
2883         u64 total_idle = 0;
2884         u64 total_evicted = 0;
2885         u64 total_relocated = 0;
2886         u64 total_moved = 0;
2887         u64 total_invalidated = 0;
2888         u64 total_done = 0;
2889         unsigned int total_idle_objs = 0;
2890         unsigned int total_evicted_objs = 0;
2891         unsigned int total_relocated_objs = 0;
2892         unsigned int total_moved_objs = 0;
2893         unsigned int total_invalidated_objs = 0;
2894         unsigned int total_done_objs = 0;
2895         unsigned int id = 0;
2896
2897         spin_lock(&vm->status_lock);
2898         seq_puts(m, "\tIdle BOs:\n");
2899         list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2900                 if (!bo_va->base.bo)
2901                         continue;
2902                 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2903         }
2904         total_idle_objs = id;
2905         id = 0;
2906
2907         seq_puts(m, "\tEvicted BOs:\n");
2908         list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2909                 if (!bo_va->base.bo)
2910                         continue;
2911                 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2912         }
2913         total_evicted_objs = id;
2914         id = 0;
2915
2916         seq_puts(m, "\tRelocated BOs:\n");
2917         list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2918                 if (!bo_va->base.bo)
2919                         continue;
2920                 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2921         }
2922         total_relocated_objs = id;
2923         id = 0;
2924
2925         seq_puts(m, "\tMoved BOs:\n");
2926         list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2927                 if (!bo_va->base.bo)
2928                         continue;
2929                 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2930         }
2931         total_moved_objs = id;
2932         id = 0;
2933
2934         seq_puts(m, "\tInvalidated BOs:\n");
2935         list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2936                 if (!bo_va->base.bo)
2937                         continue;
2938                 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2939         }
2940         total_invalidated_objs = id;
2941         id = 0;
2942
2943         seq_puts(m, "\tDone BOs:\n");
2944         list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2945                 if (!bo_va->base.bo)
2946                         continue;
2947                 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2948         }
2949         spin_unlock(&vm->status_lock);
2950         total_done_objs = id;
2951
2952         seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
2953                    total_idle_objs);
2954         seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
2955                    total_evicted_objs);
2956         seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
2957                    total_relocated_objs);
2958         seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
2959                    total_moved_objs);
2960         seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
2961                    total_invalidated_objs);
2962         seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
2963                    total_done_objs);
2964 }
2965 #endif
2966
2967 /**
2968  * amdgpu_vm_update_fault_cache - update cached fault into.
2969  * @adev: amdgpu device pointer
2970  * @pasid: PASID of the VM
2971  * @addr: Address of the fault
2972  * @status: GPUVM fault status register
2973  * @vmhub: which vmhub got the fault
2974  *
2975  * Cache the fault info for later use by userspace in debugging.
2976  */
2977 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
2978                                   unsigned int pasid,
2979                                   uint64_t addr,
2980                                   uint32_t status,
2981                                   unsigned int vmhub)
2982 {
2983         struct amdgpu_vm *vm;
2984         unsigned long flags;
2985
2986         xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2987
2988         vm = xa_load(&adev->vm_manager.pasids, pasid);
2989         /* Don't update the fault cache if status is 0.  In the multiple
2990          * fault case, subsequent faults will return a 0 status which is
2991          * useless for userspace and replaces the useful fault status, so
2992          * only update if status is non-0.
2993          */
2994         if (vm && status) {
2995                 vm->fault_info.addr = addr;
2996                 vm->fault_info.status = status;
2997                 /*
2998                  * Update the fault information globally for later usage
2999                  * when vm could be stale or freed.
3000                  */
3001                 adev->vm_manager.fault_info.addr = addr;
3002                 adev->vm_manager.fault_info.vmhub = vmhub;
3003                 adev->vm_manager.fault_info.status = status;
3004
3005                 if (AMDGPU_IS_GFXHUB(vmhub)) {
3006                         vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3007                         vm->fault_info.vmhub |=
3008                                 (vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3009                 } else if (AMDGPU_IS_MMHUB0(vmhub)) {
3010                         vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3011                         vm->fault_info.vmhub |=
3012                                 (vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3013                 } else if (AMDGPU_IS_MMHUB1(vmhub)) {
3014                         vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3015                         vm->fault_info.vmhub |=
3016                                 (vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3017                 } else {
3018                         WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3019                 }
3020         }
3021         xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3022 }
3023
3024 /**
3025  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3026  *
3027  * @vm: VM to test against.
3028  * @bo: BO to be tested.
3029  *
3030  * Returns true if the BO shares the dma_resv object with the root PD and is
3031  * always guaranteed to be valid inside the VM.
3032  */
3033 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3034 {
3035         return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3036 }
This page took 0.205563 seconds and 4 git commands to generate.