]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drm/amdgpu: Fix SDMA TO after GPU reset v3
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
37
38 /**
39  * DOC: GPUVM
40  *
41  * GPUVM is similar to the legacy gart on older asics, however
42  * rather than there being a single global gart table
43  * for the entire GPU, there are multiple VM page tables active
44  * at any given time.  The VM page tables can contain a mix
45  * vram pages and system memory pages and system memory pages
46  * can be mapped as snooped (cached system pages) or unsnooped
47  * (uncached system pages).
48  * Each VM has an ID associated with it and there is a page table
49  * associated with each VMID.  When execting a command buffer,
50  * the kernel tells the the ring what VMID to use for that command
51  * buffer.  VMIDs are allocated dynamically as commands are submitted.
52  * The userspace drivers maintain their own address space and the kernel
53  * sets up their pages tables accordingly when they submit their
54  * command buffers and a VMID is assigned.
55  * Cayman/Trinity support up to 8 active VMs at any given time;
56  * SI supports 16.
57  */
58
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
61
62 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
63                      START, LAST, static, amdgpu_vm_it)
64
65 #undef START
66 #undef LAST
67
68 /**
69  * struct amdgpu_pte_update_params - Local structure
70  *
71  * Encapsulate some VM table update parameters to reduce
72  * the number of function parameters
73  *
74  */
75 struct amdgpu_pte_update_params {
76
77         /**
78          * @adev: amdgpu device we do this update for
79          */
80         struct amdgpu_device *adev;
81
82         /**
83          * @vm: optional amdgpu_vm we do this update for
84          */
85         struct amdgpu_vm *vm;
86
87         /**
88          * @src: address where to copy page table entries from
89          */
90         uint64_t src;
91
92         /**
93          * @ib: indirect buffer to fill with commands
94          */
95         struct amdgpu_ib *ib;
96
97         /**
98          * @func: Function which actually does the update
99          */
100         void (*func)(struct amdgpu_pte_update_params *params,
101                      struct amdgpu_bo *bo, uint64_t pe,
102                      uint64_t addr, unsigned count, uint32_t incr,
103                      uint64_t flags);
104         /**
105          * @pages_addr:
106          *
107          * DMA addresses to use for mapping, used during VM update by CPU
108          */
109         dma_addr_t *pages_addr;
110
111         /**
112          * @kptr:
113          *
114          * Kernel pointer of PD/PT BO that needs to be updated,
115          * used during VM update by CPU
116          */
117         void *kptr;
118 };
119
120 /**
121  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
122  */
123 struct amdgpu_prt_cb {
124
125         /**
126          * @adev: amdgpu device
127          */
128         struct amdgpu_device *adev;
129
130         /**
131          * @cb: callback
132          */
133         struct dma_fence_cb cb;
134 };
135
136 /**
137  * amdgpu_vm_level_shift - return the addr shift for each level
138  *
139  * @adev: amdgpu_device pointer
140  * @level: VMPT level
141  *
142  * Returns:
143  * The number of bits the pfn needs to be right shifted for a level.
144  */
145 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
146                                       unsigned level)
147 {
148         unsigned shift = 0xff;
149
150         switch (level) {
151         case AMDGPU_VM_PDB2:
152         case AMDGPU_VM_PDB1:
153         case AMDGPU_VM_PDB0:
154                 shift = 9 * (AMDGPU_VM_PDB0 - level) +
155                         adev->vm_manager.block_size;
156                 break;
157         case AMDGPU_VM_PTB:
158                 shift = 0;
159                 break;
160         default:
161                 dev_err(adev->dev, "the level%d isn't supported.\n", level);
162         }
163
164         return shift;
165 }
166
167 /**
168  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
169  *
170  * @adev: amdgpu_device pointer
171  * @level: VMPT level
172  *
173  * Returns:
174  * The number of entries in a page directory or page table.
175  */
176 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
177                                       unsigned level)
178 {
179         unsigned shift = amdgpu_vm_level_shift(adev,
180                                                adev->vm_manager.root_level);
181
182         if (level == adev->vm_manager.root_level)
183                 /* For the root directory */
184                 return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
185         else if (level != AMDGPU_VM_PTB)
186                 /* Everything in between */
187                 return 512;
188         else
189                 /* For the page tables on the leaves */
190                 return AMDGPU_VM_PTE_COUNT(adev);
191 }
192
193 /**
194  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
195  *
196  * @adev: amdgpu_device pointer
197  * @level: VMPT level
198  *
199  * Returns:
200  * The size of the BO for a page directory or page table in bytes.
201  */
202 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
203 {
204         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
205 }
206
207 /**
208  * amdgpu_vm_bo_evicted - vm_bo is evicted
209  *
210  * @vm_bo: vm_bo which is evicted
211  *
212  * State for PDs/PTs and per VM BOs which are not at the location they should
213  * be.
214  */
215 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
216 {
217         struct amdgpu_vm *vm = vm_bo->vm;
218         struct amdgpu_bo *bo = vm_bo->bo;
219
220         vm_bo->moved = true;
221         if (bo->tbo.type == ttm_bo_type_kernel)
222                 list_move(&vm_bo->vm_status, &vm->evicted);
223         else
224                 list_move_tail(&vm_bo->vm_status, &vm->evicted);
225 }
226
227 /**
228  * amdgpu_vm_bo_relocated - vm_bo is reloacted
229  *
230  * @vm_bo: vm_bo which is relocated
231  *
232  * State for PDs/PTs which needs to update their parent PD.
233  */
234 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
235 {
236         list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
237 }
238
239 /**
240  * amdgpu_vm_bo_moved - vm_bo is moved
241  *
242  * @vm_bo: vm_bo which is moved
243  *
244  * State for per VM BOs which are moved, but that change is not yet reflected
245  * in the page tables.
246  */
247 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
248 {
249         list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
250 }
251
252 /**
253  * amdgpu_vm_bo_idle - vm_bo is idle
254  *
255  * @vm_bo: vm_bo which is now idle
256  *
257  * State for PDs/PTs and per VM BOs which have gone through the state machine
258  * and are now idle.
259  */
260 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
261 {
262         list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
263         vm_bo->moved = false;
264 }
265
266 /**
267  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
268  *
269  * @vm_bo: vm_bo which is now invalidated
270  *
271  * State for normal BOs which are invalidated and that change not yet reflected
272  * in the PTs.
273  */
274 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
275 {
276         spin_lock(&vm_bo->vm->invalidated_lock);
277         list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
278         spin_unlock(&vm_bo->vm->invalidated_lock);
279 }
280
281 /**
282  * amdgpu_vm_bo_done - vm_bo is done
283  *
284  * @vm_bo: vm_bo which is now done
285  *
286  * State for normal BOs which are invalidated and that change has been updated
287  * in the PTs.
288  */
289 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
290 {
291         spin_lock(&vm_bo->vm->invalidated_lock);
292         list_del_init(&vm_bo->vm_status);
293         spin_unlock(&vm_bo->vm->invalidated_lock);
294 }
295
296 /**
297  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
298  *
299  * @base: base structure for tracking BO usage in a VM
300  * @vm: vm to which bo is to be added
301  * @bo: amdgpu buffer object
302  *
303  * Initialize a bo_va_base structure and add it to the appropriate lists
304  *
305  */
306 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
307                                    struct amdgpu_vm *vm,
308                                    struct amdgpu_bo *bo)
309 {
310         base->vm = vm;
311         base->bo = bo;
312         INIT_LIST_HEAD(&base->bo_list);
313         INIT_LIST_HEAD(&base->vm_status);
314
315         if (!bo)
316                 return;
317         list_add_tail(&base->bo_list, &bo->va);
318
319         if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
320                 return;
321
322         vm->bulk_moveable = false;
323         if (bo->tbo.type == ttm_bo_type_kernel)
324                 amdgpu_vm_bo_relocated(base);
325         else
326                 amdgpu_vm_bo_idle(base);
327
328         if (bo->preferred_domains &
329             amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
330                 return;
331
332         /*
333          * we checked all the prerequisites, but it looks like this per vm bo
334          * is currently evicted. add the bo to the evicted list to make sure it
335          * is validated on next vm use to avoid fault.
336          * */
337         amdgpu_vm_bo_evicted(base);
338 }
339
340 /**
341  * amdgpu_vm_pt_parent - get the parent page directory
342  *
343  * @pt: child page table
344  *
345  * Helper to get the parent entry for the child page table. NULL if we are at
346  * the root page directory.
347  */
348 static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
349 {
350         struct amdgpu_bo *parent = pt->base.bo->parent;
351
352         if (!parent)
353                 return NULL;
354
355         return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list);
356 }
357
358 /**
359  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
360  *
361  * @vm: vm providing the BOs
362  * @validated: head of validation list
363  * @entry: entry to add
364  *
365  * Add the page directory to the list of BOs to
366  * validate for command submission.
367  */
368 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
369                          struct list_head *validated,
370                          struct amdgpu_bo_list_entry *entry)
371 {
372         entry->robj = vm->root.base.bo;
373         entry->priority = 0;
374         entry->tv.bo = &entry->robj->tbo;
375         entry->tv.shared = true;
376         entry->user_pages = NULL;
377         list_add(&entry->tv.head, validated);
378 }
379
380 /**
381  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
382  *
383  * @adev: amdgpu device pointer
384  * @vm: vm providing the BOs
385  *
386  * Move all BOs to the end of LRU and remember their positions to put them
387  * together.
388  */
389 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
390                                 struct amdgpu_vm *vm)
391 {
392         struct ttm_bo_global *glob = adev->mman.bdev.glob;
393         struct amdgpu_vm_bo_base *bo_base;
394
395         if (vm->bulk_moveable) {
396                 spin_lock(&glob->lru_lock);
397                 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
398                 spin_unlock(&glob->lru_lock);
399                 return;
400         }
401
402         memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
403
404         spin_lock(&glob->lru_lock);
405         list_for_each_entry(bo_base, &vm->idle, vm_status) {
406                 struct amdgpu_bo *bo = bo_base->bo;
407
408                 if (!bo->parent)
409                         continue;
410
411                 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
412                 if (bo->shadow)
413                         ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
414                                                 &vm->lru_bulk_move);
415         }
416         spin_unlock(&glob->lru_lock);
417
418         vm->bulk_moveable = true;
419 }
420
421 /**
422  * amdgpu_vm_validate_pt_bos - validate the page table BOs
423  *
424  * @adev: amdgpu device pointer
425  * @vm: vm providing the BOs
426  * @validate: callback to do the validation
427  * @param: parameter for the validation callback
428  *
429  * Validate the page table BOs on command submission if neccessary.
430  *
431  * Returns:
432  * Validation result.
433  */
434 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
435                               int (*validate)(void *p, struct amdgpu_bo *bo),
436                               void *param)
437 {
438         struct amdgpu_vm_bo_base *bo_base, *tmp;
439         int r = 0;
440
441         vm->bulk_moveable &= list_empty(&vm->evicted);
442
443         list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
444                 struct amdgpu_bo *bo = bo_base->bo;
445
446                 r = validate(param, bo);
447                 if (r)
448                         break;
449
450                 if (bo->tbo.type != ttm_bo_type_kernel) {
451                         amdgpu_vm_bo_moved(bo_base);
452                 } else {
453                         if (vm->use_cpu_for_update)
454                                 r = amdgpu_bo_kmap(bo, NULL);
455                         else
456                                 r = amdgpu_ttm_alloc_gart(&bo->tbo);
457                         if (r)
458                                 break;
459                         if (bo->shadow) {
460                                 r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
461                                 if (r)
462                                         break;
463                         }
464                         amdgpu_vm_bo_relocated(bo_base);
465                 }
466         }
467
468         return r;
469 }
470
471 /**
472  * amdgpu_vm_ready - check VM is ready for updates
473  *
474  * @vm: VM to check
475  *
476  * Check if all VM PDs/PTs are ready for updates
477  *
478  * Returns:
479  * True if eviction list is empty.
480  */
481 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
482 {
483         return list_empty(&vm->evicted);
484 }
485
486 /**
487  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
488  *
489  * @adev: amdgpu_device pointer
490  * @vm: VM to clear BO from
491  * @bo: BO to clear
492  * @level: level this BO is at
493  * @pte_support_ats: indicate ATS support from PTE
494  *
495  * Root PD needs to be reserved when calling this.
496  *
497  * Returns:
498  * 0 on success, errno otherwise.
499  */
500 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
501                               struct amdgpu_vm *vm, struct amdgpu_bo *bo,
502                               unsigned level, bool pte_support_ats)
503 {
504         struct ttm_operation_ctx ctx = { true, false };
505         struct dma_fence *fence = NULL;
506         unsigned entries, ats_entries;
507         struct amdgpu_ring *ring;
508         struct amdgpu_job *job;
509         uint64_t addr;
510         int r;
511
512         entries = amdgpu_bo_size(bo) / 8;
513
514         if (pte_support_ats) {
515                 if (level == adev->vm_manager.root_level) {
516                         ats_entries = amdgpu_vm_level_shift(adev, level);
517                         ats_entries += AMDGPU_GPU_PAGE_SHIFT;
518                         ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
519                         ats_entries = min(ats_entries, entries);
520                         entries -= ats_entries;
521                 } else {
522                         ats_entries = entries;
523                         entries = 0;
524                 }
525         } else {
526                 ats_entries = 0;
527         }
528
529         ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
530
531         r = reservation_object_reserve_shared(bo->tbo.resv);
532         if (r)
533                 return r;
534
535         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
536         if (r)
537                 goto error;
538
539         r = amdgpu_ttm_alloc_gart(&bo->tbo);
540         if (r)
541                 return r;
542
543         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
544         if (r)
545                 goto error;
546
547         addr = amdgpu_bo_gpu_offset(bo);
548         if (ats_entries) {
549                 uint64_t ats_value;
550
551                 ats_value = AMDGPU_PTE_DEFAULT_ATC;
552                 if (level != AMDGPU_VM_PTB)
553                         ats_value |= AMDGPU_PDE_PTE;
554
555                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
556                                       ats_entries, 0, ats_value);
557                 addr += ats_entries * 8;
558         }
559
560         if (entries)
561                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
562                                       entries, 0, 0);
563
564         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
565
566         WARN_ON(job->ibs[0].length_dw > 64);
567         r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
568                              AMDGPU_FENCE_OWNER_UNDEFINED, false);
569         if (r)
570                 goto error_free;
571
572         r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
573                               &fence);
574         if (r)
575                 goto error_free;
576
577         amdgpu_bo_fence(bo, fence, true);
578         dma_fence_put(fence);
579
580         if (bo->shadow)
581                 return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
582                                           level, pte_support_ats);
583
584         return 0;
585
586 error_free:
587         amdgpu_job_free(job);
588
589 error:
590         return r;
591 }
592
593 /**
594  * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
595  *
596  * @adev: amdgpu_device pointer
597  * @vm: requesting vm
598  * @bp: resulting BO allocation parameters
599  */
600 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
601                                int level, struct amdgpu_bo_param *bp)
602 {
603         memset(bp, 0, sizeof(*bp));
604
605         bp->size = amdgpu_vm_bo_size(adev, level);
606         bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
607         bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
608         if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
609             adev->flags & AMD_IS_APU)
610                 bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
611         bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
612         bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
613                 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
614         if (vm->use_cpu_for_update)
615                 bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
616         else if (!vm->root.base.bo || vm->root.base.bo->shadow)
617                 bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
618         bp->type = ttm_bo_type_kernel;
619         if (vm->root.base.bo)
620                 bp->resv = vm->root.base.bo->tbo.resv;
621 }
622
623 /**
624  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
625  *
626  * @adev: amdgpu_device pointer
627  * @vm: requested vm
628  * @parent: parent PT
629  * @saddr: start of the address range
630  * @eaddr: end of the address range
631  * @level: VMPT level
632  * @ats: indicate ATS support from PTE
633  *
634  * Make sure the page directories and page tables are allocated
635  *
636  * Returns:
637  * 0 on success, errno otherwise.
638  */
639 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
640                                   struct amdgpu_vm *vm,
641                                   struct amdgpu_vm_pt *parent,
642                                   uint64_t saddr, uint64_t eaddr,
643                                   unsigned level, bool ats)
644 {
645         unsigned shift = amdgpu_vm_level_shift(adev, level);
646         struct amdgpu_bo_param bp;
647         unsigned pt_idx, from, to;
648         int r;
649
650         if (!parent->entries) {
651                 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
652
653                 parent->entries = kvmalloc_array(num_entries,
654                                                    sizeof(struct amdgpu_vm_pt),
655                                                    GFP_KERNEL | __GFP_ZERO);
656                 if (!parent->entries)
657                         return -ENOMEM;
658         }
659
660         from = saddr >> shift;
661         to = eaddr >> shift;
662         if (from >= amdgpu_vm_num_entries(adev, level) ||
663             to >= amdgpu_vm_num_entries(adev, level))
664                 return -EINVAL;
665
666         ++level;
667         saddr = saddr & ((1 << shift) - 1);
668         eaddr = eaddr & ((1 << shift) - 1);
669
670         amdgpu_vm_bo_param(adev, vm, level, &bp);
671
672         /* walk over the address space and allocate the page tables */
673         for (pt_idx = from; pt_idx <= to; ++pt_idx) {
674                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
675                 struct amdgpu_bo *pt;
676
677                 if (!entry->base.bo) {
678                         r = amdgpu_bo_create(adev, &bp, &pt);
679                         if (r)
680                                 return r;
681
682                         r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
683                         if (r) {
684                                 amdgpu_bo_unref(&pt->shadow);
685                                 amdgpu_bo_unref(&pt);
686                                 return r;
687                         }
688
689                         if (vm->use_cpu_for_update) {
690                                 r = amdgpu_bo_kmap(pt, NULL);
691                                 if (r) {
692                                         amdgpu_bo_unref(&pt->shadow);
693                                         amdgpu_bo_unref(&pt);
694                                         return r;
695                                 }
696                         }
697
698                         /* Keep a reference to the root directory to avoid
699                         * freeing them up in the wrong order.
700                         */
701                         pt->parent = amdgpu_bo_ref(parent->base.bo);
702
703                         amdgpu_vm_bo_base_init(&entry->base, vm, pt);
704                 }
705
706                 if (level < AMDGPU_VM_PTB) {
707                         uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
708                         uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
709                                 ((1 << shift) - 1);
710                         r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
711                                                    sub_eaddr, level, ats);
712                         if (r)
713                                 return r;
714                 }
715         }
716
717         return 0;
718 }
719
720 /**
721  * amdgpu_vm_alloc_pts - Allocate page tables.
722  *
723  * @adev: amdgpu_device pointer
724  * @vm: VM to allocate page tables for
725  * @saddr: Start address which needs to be allocated
726  * @size: Size from start address we need.
727  *
728  * Make sure the page tables are allocated.
729  *
730  * Returns:
731  * 0 on success, errno otherwise.
732  */
733 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
734                         struct amdgpu_vm *vm,
735                         uint64_t saddr, uint64_t size)
736 {
737         uint64_t eaddr;
738         bool ats = false;
739
740         /* validate the parameters */
741         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
742                 return -EINVAL;
743
744         eaddr = saddr + size - 1;
745
746         if (vm->pte_support_ats)
747                 ats = saddr < AMDGPU_GMC_HOLE_START;
748
749         saddr /= AMDGPU_GPU_PAGE_SIZE;
750         eaddr /= AMDGPU_GPU_PAGE_SIZE;
751
752         if (eaddr >= adev->vm_manager.max_pfn) {
753                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
754                         eaddr, adev->vm_manager.max_pfn);
755                 return -EINVAL;
756         }
757
758         return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
759                                       adev->vm_manager.root_level, ats);
760 }
761
762 /**
763  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
764  *
765  * @adev: amdgpu_device pointer
766  */
767 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
768 {
769         const struct amdgpu_ip_block *ip_block;
770         bool has_compute_vm_bug;
771         struct amdgpu_ring *ring;
772         int i;
773
774         has_compute_vm_bug = false;
775
776         ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
777         if (ip_block) {
778                 /* Compute has a VM bug for GFX version < 7.
779                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
780                 if (ip_block->version->major <= 7)
781                         has_compute_vm_bug = true;
782                 else if (ip_block->version->major == 8)
783                         if (adev->gfx.mec_fw_version < 673)
784                                 has_compute_vm_bug = true;
785         }
786
787         for (i = 0; i < adev->num_rings; i++) {
788                 ring = adev->rings[i];
789                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
790                         /* only compute rings */
791                         ring->has_compute_vm_bug = has_compute_vm_bug;
792                 else
793                         ring->has_compute_vm_bug = false;
794         }
795 }
796
797 /**
798  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
799  *
800  * @ring: ring on which the job will be submitted
801  * @job: job to submit
802  *
803  * Returns:
804  * True if sync is needed.
805  */
806 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
807                                   struct amdgpu_job *job)
808 {
809         struct amdgpu_device *adev = ring->adev;
810         unsigned vmhub = ring->funcs->vmhub;
811         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
812         struct amdgpu_vmid *id;
813         bool gds_switch_needed;
814         bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
815
816         if (job->vmid == 0)
817                 return false;
818         id = &id_mgr->ids[job->vmid];
819         gds_switch_needed = ring->funcs->emit_gds_switch && (
820                 id->gds_base != job->gds_base ||
821                 id->gds_size != job->gds_size ||
822                 id->gws_base != job->gws_base ||
823                 id->gws_size != job->gws_size ||
824                 id->oa_base != job->oa_base ||
825                 id->oa_size != job->oa_size);
826
827         if (amdgpu_vmid_had_gpu_reset(adev, id))
828                 return true;
829
830         return vm_flush_needed || gds_switch_needed;
831 }
832
833 /**
834  * amdgpu_vm_flush - hardware flush the vm
835  *
836  * @ring: ring to use for flush
837  * @job:  related job
838  * @need_pipe_sync: is pipe sync needed
839  *
840  * Emit a VM flush when it is necessary.
841  *
842  * Returns:
843  * 0 on success, errno otherwise.
844  */
845 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
846 {
847         struct amdgpu_device *adev = ring->adev;
848         unsigned vmhub = ring->funcs->vmhub;
849         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
850         struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
851         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
852                 id->gds_base != job->gds_base ||
853                 id->gds_size != job->gds_size ||
854                 id->gws_base != job->gws_base ||
855                 id->gws_size != job->gws_size ||
856                 id->oa_base != job->oa_base ||
857                 id->oa_size != job->oa_size);
858         bool vm_flush_needed = job->vm_needs_flush;
859         bool pasid_mapping_needed = id->pasid != job->pasid ||
860                 !id->pasid_mapping ||
861                 !dma_fence_is_signaled(id->pasid_mapping);
862         struct dma_fence *fence = NULL;
863         unsigned patch_offset = 0;
864         int r;
865
866         if (amdgpu_vmid_had_gpu_reset(adev, id)) {
867                 gds_switch_needed = true;
868                 vm_flush_needed = true;
869                 pasid_mapping_needed = true;
870         }
871
872         gds_switch_needed &= !!ring->funcs->emit_gds_switch;
873         vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
874                         job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
875         pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
876                 ring->funcs->emit_wreg;
877
878         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
879                 return 0;
880
881         if (ring->funcs->init_cond_exec)
882                 patch_offset = amdgpu_ring_init_cond_exec(ring);
883
884         if (need_pipe_sync)
885                 amdgpu_ring_emit_pipeline_sync(ring);
886
887         if (vm_flush_needed) {
888                 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
889                 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
890         }
891
892         if (pasid_mapping_needed)
893                 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
894
895         if (vm_flush_needed || pasid_mapping_needed) {
896                 r = amdgpu_fence_emit(ring, &fence, 0);
897                 if (r)
898                         return r;
899         }
900
901         if (vm_flush_needed) {
902                 mutex_lock(&id_mgr->lock);
903                 dma_fence_put(id->last_flush);
904                 id->last_flush = dma_fence_get(fence);
905                 id->current_gpu_reset_count =
906                         atomic_read(&adev->gpu_reset_counter);
907                 mutex_unlock(&id_mgr->lock);
908         }
909
910         if (pasid_mapping_needed) {
911                 id->pasid = job->pasid;
912                 dma_fence_put(id->pasid_mapping);
913                 id->pasid_mapping = dma_fence_get(fence);
914         }
915         dma_fence_put(fence);
916
917         if (ring->funcs->emit_gds_switch && gds_switch_needed) {
918                 id->gds_base = job->gds_base;
919                 id->gds_size = job->gds_size;
920                 id->gws_base = job->gws_base;
921                 id->gws_size = job->gws_size;
922                 id->oa_base = job->oa_base;
923                 id->oa_size = job->oa_size;
924                 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
925                                             job->gds_size, job->gws_base,
926                                             job->gws_size, job->oa_base,
927                                             job->oa_size);
928         }
929
930         if (ring->funcs->patch_cond_exec)
931                 amdgpu_ring_patch_cond_exec(ring, patch_offset);
932
933         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
934         if (ring->funcs->emit_switch_buffer) {
935                 amdgpu_ring_emit_switch_buffer(ring);
936                 amdgpu_ring_emit_switch_buffer(ring);
937         }
938         return 0;
939 }
940
941 /**
942  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
943  *
944  * @vm: requested vm
945  * @bo: requested buffer object
946  *
947  * Find @bo inside the requested vm.
948  * Search inside the @bos vm list for the requested vm
949  * Returns the found bo_va or NULL if none is found
950  *
951  * Object has to be reserved!
952  *
953  * Returns:
954  * Found bo_va or NULL.
955  */
956 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
957                                        struct amdgpu_bo *bo)
958 {
959         struct amdgpu_bo_va *bo_va;
960
961         list_for_each_entry(bo_va, &bo->va, base.bo_list) {
962                 if (bo_va->base.vm == vm) {
963                         return bo_va;
964                 }
965         }
966         return NULL;
967 }
968
969 /**
970  * amdgpu_vm_do_set_ptes - helper to call the right asic function
971  *
972  * @params: see amdgpu_pte_update_params definition
973  * @bo: PD/PT to update
974  * @pe: addr of the page entry
975  * @addr: dst addr to write into pe
976  * @count: number of page entries to update
977  * @incr: increase next addr by incr bytes
978  * @flags: hw access flags
979  *
980  * Traces the parameters and calls the right asic functions
981  * to setup the page table using the DMA.
982  */
983 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
984                                   struct amdgpu_bo *bo,
985                                   uint64_t pe, uint64_t addr,
986                                   unsigned count, uint32_t incr,
987                                   uint64_t flags)
988 {
989         pe += amdgpu_bo_gpu_offset(bo);
990         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
991
992         if (count < 3) {
993                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
994                                     addr | flags, count, incr);
995
996         } else {
997                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
998                                       count, incr, flags);
999         }
1000 }
1001
1002 /**
1003  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
1004  *
1005  * @params: see amdgpu_pte_update_params definition
1006  * @bo: PD/PT to update
1007  * @pe: addr of the page entry
1008  * @addr: dst addr to write into pe
1009  * @count: number of page entries to update
1010  * @incr: increase next addr by incr bytes
1011  * @flags: hw access flags
1012  *
1013  * Traces the parameters and calls the DMA function to copy the PTEs.
1014  */
1015 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
1016                                    struct amdgpu_bo *bo,
1017                                    uint64_t pe, uint64_t addr,
1018                                    unsigned count, uint32_t incr,
1019                                    uint64_t flags)
1020 {
1021         uint64_t src = (params->src + (addr >> 12) * 8);
1022
1023         pe += amdgpu_bo_gpu_offset(bo);
1024         trace_amdgpu_vm_copy_ptes(pe, src, count);
1025
1026         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
1027 }
1028
1029 /**
1030  * amdgpu_vm_map_gart - Resolve gart mapping of addr
1031  *
1032  * @pages_addr: optional DMA address to use for lookup
1033  * @addr: the unmapped addr
1034  *
1035  * Look up the physical address of the page that the pte resolves
1036  * to.
1037  *
1038  * Returns:
1039  * The pointer for the page table entry.
1040  */
1041 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1042 {
1043         uint64_t result;
1044
1045         /* page table offset */
1046         result = pages_addr[addr >> PAGE_SHIFT];
1047
1048         /* in case cpu page size != gpu page size*/
1049         result |= addr & (~PAGE_MASK);
1050
1051         result &= 0xFFFFFFFFFFFFF000ULL;
1052
1053         return result;
1054 }
1055
1056 /**
1057  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
1058  *
1059  * @params: see amdgpu_pte_update_params definition
1060  * @bo: PD/PT to update
1061  * @pe: kmap addr of the page entry
1062  * @addr: dst addr to write into pe
1063  * @count: number of page entries to update
1064  * @incr: increase next addr by incr bytes
1065  * @flags: hw access flags
1066  *
1067  * Write count number of PT/PD entries directly.
1068  */
1069 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
1070                                    struct amdgpu_bo *bo,
1071                                    uint64_t pe, uint64_t addr,
1072                                    unsigned count, uint32_t incr,
1073                                    uint64_t flags)
1074 {
1075         unsigned int i;
1076         uint64_t value;
1077
1078         pe += (unsigned long)amdgpu_bo_kptr(bo);
1079
1080         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
1081
1082         for (i = 0; i < count; i++) {
1083                 value = params->pages_addr ?
1084                         amdgpu_vm_map_gart(params->pages_addr, addr) :
1085                         addr;
1086                 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
1087                                        i, value, flags);
1088                 addr += incr;
1089         }
1090 }
1091
1092
1093 /**
1094  * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
1095  *
1096  * @adev: amdgpu_device pointer
1097  * @vm: related vm
1098  * @owner: fence owner
1099  *
1100  * Returns:
1101  * 0 on success, errno otherwise.
1102  */
1103 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1104                              void *owner)
1105 {
1106         struct amdgpu_sync sync;
1107         int r;
1108
1109         amdgpu_sync_create(&sync);
1110         amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
1111         r = amdgpu_sync_wait(&sync, true);
1112         amdgpu_sync_free(&sync);
1113
1114         return r;
1115 }
1116
1117 /**
1118  * amdgpu_vm_update_func - helper to call update function
1119  *
1120  * Calls the update function for both the given BO as well as its shadow.
1121  */
1122 static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
1123                                   struct amdgpu_bo *bo,
1124                                   uint64_t pe, uint64_t addr,
1125                                   unsigned count, uint32_t incr,
1126                                   uint64_t flags)
1127 {
1128         if (bo->shadow)
1129                 params->func(params, bo->shadow, pe, addr, count, incr, flags);
1130         params->func(params, bo, pe, addr, count, incr, flags);
1131 }
1132
1133 /*
1134  * amdgpu_vm_update_pde - update a single level in the hierarchy
1135  *
1136  * @param: parameters for the update
1137  * @vm: requested vm
1138  * @parent: parent directory
1139  * @entry: entry to update
1140  *
1141  * Makes sure the requested entry in parent is up to date.
1142  */
1143 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
1144                                  struct amdgpu_vm *vm,
1145                                  struct amdgpu_vm_pt *parent,
1146                                  struct amdgpu_vm_pt *entry)
1147 {
1148         struct amdgpu_bo *bo = parent->base.bo, *pbo;
1149         uint64_t pde, pt, flags;
1150         unsigned level;
1151
1152         /* Don't update huge pages here */
1153         if (entry->huge)
1154                 return;
1155
1156         for (level = 0, pbo = bo->parent; pbo; ++level)
1157                 pbo = pbo->parent;
1158
1159         level += params->adev->vm_manager.root_level;
1160         amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
1161         pde = (entry - parent->entries) * 8;
1162         amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
1163 }
1164
1165 /*
1166  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
1167  *
1168  * @adev: amdgpu_device pointer
1169  * @vm: related vm
1170  * @parent: parent PD
1171  * @level: VMPT level
1172  *
1173  * Mark all PD level as invalid after an error.
1174  */
1175 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
1176                                        struct amdgpu_vm *vm,
1177                                        struct amdgpu_vm_pt *parent,
1178                                        unsigned level)
1179 {
1180         unsigned pt_idx, num_entries;
1181
1182         /*
1183          * Recurse into the subdirectories. This recursion is harmless because
1184          * we only have a maximum of 5 layers.
1185          */
1186         num_entries = amdgpu_vm_num_entries(adev, level);
1187         for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1188                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1189
1190                 if (!entry->base.bo)
1191                         continue;
1192
1193                 if (!entry->base.moved)
1194                         amdgpu_vm_bo_relocated(&entry->base);
1195                 amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1196         }
1197 }
1198
1199 /*
1200  * amdgpu_vm_update_directories - make sure that all directories are valid
1201  *
1202  * @adev: amdgpu_device pointer
1203  * @vm: requested vm
1204  *
1205  * Makes sure all directories are up to date.
1206  *
1207  * Returns:
1208  * 0 for success, error for failure.
1209  */
1210 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1211                                  struct amdgpu_vm *vm)
1212 {
1213         struct amdgpu_pte_update_params params;
1214         struct amdgpu_job *job;
1215         unsigned ndw = 0;
1216         int r = 0;
1217
1218         if (list_empty(&vm->relocated))
1219                 return 0;
1220
1221 restart:
1222         memset(&params, 0, sizeof(params));
1223         params.adev = adev;
1224
1225         if (vm->use_cpu_for_update) {
1226                 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1227                 if (unlikely(r))
1228                         return r;
1229
1230                 params.func = amdgpu_vm_cpu_set_ptes;
1231         } else {
1232                 ndw = 512 * 8;
1233                 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1234                 if (r)
1235                         return r;
1236
1237                 params.ib = &job->ibs[0];
1238                 params.func = amdgpu_vm_do_set_ptes;
1239         }
1240
1241         while (!list_empty(&vm->relocated)) {
1242                 struct amdgpu_vm_pt *pt, *entry;
1243
1244                 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
1245                                          base.vm_status);
1246                 amdgpu_vm_bo_idle(&entry->base);
1247
1248                 pt = amdgpu_vm_pt_parent(entry);
1249                 if (!pt)
1250                         continue;
1251
1252                 amdgpu_vm_update_pde(&params, vm, pt, entry);
1253
1254                 if (!vm->use_cpu_for_update &&
1255                     (ndw - params.ib->length_dw) < 32)
1256                         break;
1257         }
1258
1259         if (vm->use_cpu_for_update) {
1260                 /* Flush HDP */
1261                 mb();
1262                 amdgpu_asic_flush_hdp(adev, NULL);
1263         } else if (params.ib->length_dw == 0) {
1264                 amdgpu_job_free(job);
1265         } else {
1266                 struct amdgpu_bo *root = vm->root.base.bo;
1267                 struct amdgpu_ring *ring;
1268                 struct dma_fence *fence;
1269
1270                 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1271                                     sched);
1272
1273                 amdgpu_ring_pad_ib(ring, params.ib);
1274                 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1275                                  AMDGPU_FENCE_OWNER_VM, false);
1276                 WARN_ON(params.ib->length_dw > ndw);
1277                 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
1278                                       &fence);
1279                 if (r)
1280                         goto error;
1281
1282                 amdgpu_bo_fence(root, fence, true);
1283                 dma_fence_put(vm->last_update);
1284                 vm->last_update = fence;
1285         }
1286
1287         if (!list_empty(&vm->relocated))
1288                 goto restart;
1289
1290         return 0;
1291
1292 error:
1293         amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1294                                    adev->vm_manager.root_level);
1295         amdgpu_job_free(job);
1296         return r;
1297 }
1298
1299 /**
1300  * amdgpu_vm_find_entry - find the entry for an address
1301  *
1302  * @p: see amdgpu_pte_update_params definition
1303  * @addr: virtual address in question
1304  * @entry: resulting entry or NULL
1305  * @parent: parent entry
1306  *
1307  * Find the vm_pt entry and it's parent for the given address.
1308  */
1309 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1310                          struct amdgpu_vm_pt **entry,
1311                          struct amdgpu_vm_pt **parent)
1312 {
1313         unsigned level = p->adev->vm_manager.root_level;
1314
1315         *parent = NULL;
1316         *entry = &p->vm->root;
1317         while ((*entry)->entries) {
1318                 unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1319
1320                 *parent = *entry;
1321                 *entry = &(*entry)->entries[addr >> shift];
1322                 addr &= (1ULL << shift) - 1;
1323         }
1324
1325         if (level != AMDGPU_VM_PTB)
1326                 *entry = NULL;
1327 }
1328
1329 /**
1330  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1331  *
1332  * @p: see amdgpu_pte_update_params definition
1333  * @entry: vm_pt entry to check
1334  * @parent: parent entry
1335  * @nptes: number of PTEs updated with this operation
1336  * @dst: destination address where the PTEs should point to
1337  * @flags: access flags fro the PTEs
1338  *
1339  * Check if we can update the PD with a huge page.
1340  */
1341 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1342                                         struct amdgpu_vm_pt *entry,
1343                                         struct amdgpu_vm_pt *parent,
1344                                         unsigned nptes, uint64_t dst,
1345                                         uint64_t flags)
1346 {
1347         uint64_t pde;
1348
1349         /* In the case of a mixed PT the PDE must point to it*/
1350         if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1351             nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1352                 /* Set the huge page flag to stop scanning at this PDE */
1353                 flags |= AMDGPU_PDE_PTE;
1354         }
1355
1356         if (!(flags & AMDGPU_PDE_PTE)) {
1357                 if (entry->huge) {
1358                         /* Add the entry to the relocated list to update it. */
1359                         entry->huge = false;
1360                         amdgpu_vm_bo_relocated(&entry->base);
1361                 }
1362                 return;
1363         }
1364
1365         entry->huge = true;
1366         amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1367
1368         pde = (entry - parent->entries) * 8;
1369         amdgpu_vm_update_func(p, parent->base.bo, pde, dst, 1, 0, flags);
1370 }
1371
1372 /**
1373  * amdgpu_vm_update_ptes - make sure that page tables are valid
1374  *
1375  * @params: see amdgpu_pte_update_params definition
1376  * @start: start of GPU address range
1377  * @end: end of GPU address range
1378  * @dst: destination address to map to, the next dst inside the function
1379  * @flags: mapping flags
1380  *
1381  * Update the page tables in the range @start - @end.
1382  *
1383  * Returns:
1384  * 0 for success, -EINVAL for failure.
1385  */
1386 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1387                                   uint64_t start, uint64_t end,
1388                                   uint64_t dst, uint64_t flags)
1389 {
1390         struct amdgpu_device *adev = params->adev;
1391         const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1392
1393         uint64_t addr, pe_start;
1394         struct amdgpu_bo *pt;
1395         unsigned nptes;
1396
1397         /* walk over the address space and update the page tables */
1398         for (addr = start; addr < end; addr += nptes,
1399              dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1400                 struct amdgpu_vm_pt *entry, *parent;
1401
1402                 amdgpu_vm_get_entry(params, addr, &entry, &parent);
1403                 if (!entry)
1404                         return -ENOENT;
1405
1406                 if ((addr & ~mask) == (end & ~mask))
1407                         nptes = end - addr;
1408                 else
1409                         nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1410
1411                 amdgpu_vm_handle_huge_pages(params, entry, parent,
1412                                             nptes, dst, flags);
1413                 /* We don't need to update PTEs for huge pages */
1414                 if (entry->huge)
1415                         continue;
1416
1417                 pt = entry->base.bo;
1418                 pe_start = (addr & mask) * 8;
1419                 amdgpu_vm_update_func(params, pt, pe_start, dst, nptes,
1420                                       AMDGPU_GPU_PAGE_SIZE, flags);
1421
1422         }
1423
1424         return 0;
1425 }
1426
1427 /*
1428  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1429  *
1430  * @params: see amdgpu_pte_update_params definition
1431  * @vm: requested vm
1432  * @start: first PTE to handle
1433  * @end: last PTE to handle
1434  * @dst: addr those PTEs should point to
1435  * @flags: hw mapping flags
1436  *
1437  * Returns:
1438  * 0 for success, -EINVAL for failure.
1439  */
1440 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params  *params,
1441                                 uint64_t start, uint64_t end,
1442                                 uint64_t dst, uint64_t flags)
1443 {
1444         /**
1445          * The MC L1 TLB supports variable sized pages, based on a fragment
1446          * field in the PTE. When this field is set to a non-zero value, page
1447          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1448          * flags are considered valid for all PTEs within the fragment range
1449          * and corresponding mappings are assumed to be physically contiguous.
1450          *
1451          * The L1 TLB can store a single PTE for the whole fragment,
1452          * significantly increasing the space available for translation
1453          * caching. This leads to large improvements in throughput when the
1454          * TLB is under pressure.
1455          *
1456          * The L2 TLB distributes small and large fragments into two
1457          * asymmetric partitions. The large fragment cache is significantly
1458          * larger. Thus, we try to use large fragments wherever possible.
1459          * Userspace can support this by aligning virtual base address and
1460          * allocation size to the fragment size.
1461          */
1462         unsigned max_frag = params->adev->vm_manager.fragment_size;
1463         int r;
1464
1465         /* system pages are non continuously */
1466         if (params->src || !(flags & AMDGPU_PTE_VALID))
1467                 return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1468
1469         while (start != end) {
1470                 uint64_t frag_flags, frag_end;
1471                 unsigned frag;
1472
1473                 /* This intentionally wraps around if no bit is set */
1474                 frag = min((unsigned)ffs(start) - 1,
1475                            (unsigned)fls64(end - start) - 1);
1476                 if (frag >= max_frag) {
1477                         frag_flags = AMDGPU_PTE_FRAG(max_frag);
1478                         frag_end = end & ~((1ULL << max_frag) - 1);
1479                 } else {
1480                         frag_flags = AMDGPU_PTE_FRAG(frag);
1481                         frag_end = start + (1 << frag);
1482                 }
1483
1484                 r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1485                                           flags | frag_flags);
1486                 if (r)
1487                         return r;
1488
1489                 dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1490                 start = frag_end;
1491         }
1492
1493         return 0;
1494 }
1495
1496 /**
1497  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1498  *
1499  * @adev: amdgpu_device pointer
1500  * @exclusive: fence we need to sync to
1501  * @pages_addr: DMA addresses to use for mapping
1502  * @vm: requested vm
1503  * @start: start of mapped range
1504  * @last: last mapped entry
1505  * @flags: flags for the entries
1506  * @addr: addr to set the area to
1507  * @fence: optional resulting fence
1508  *
1509  * Fill in the page table entries between @start and @last.
1510  *
1511  * Returns:
1512  * 0 for success, -EINVAL for failure.
1513  */
1514 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1515                                        struct dma_fence *exclusive,
1516                                        dma_addr_t *pages_addr,
1517                                        struct amdgpu_vm *vm,
1518                                        uint64_t start, uint64_t last,
1519                                        uint64_t flags, uint64_t addr,
1520                                        struct dma_fence **fence)
1521 {
1522         struct amdgpu_ring *ring;
1523         void *owner = AMDGPU_FENCE_OWNER_VM;
1524         unsigned nptes, ncmds, ndw;
1525         struct amdgpu_job *job;
1526         struct amdgpu_pte_update_params params;
1527         struct dma_fence *f = NULL;
1528         int r;
1529
1530         memset(&params, 0, sizeof(params));
1531         params.adev = adev;
1532         params.vm = vm;
1533
1534         /* sync to everything on unmapping */
1535         if (!(flags & AMDGPU_PTE_VALID))
1536                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1537
1538         if (vm->use_cpu_for_update) {
1539                 /* params.src is used as flag to indicate system Memory */
1540                 if (pages_addr)
1541                         params.src = ~0;
1542
1543                 /* Wait for PT BOs to be free. PTs share the same resv. object
1544                  * as the root PD BO
1545                  */
1546                 r = amdgpu_vm_wait_pd(adev, vm, owner);
1547                 if (unlikely(r))
1548                         return r;
1549
1550                 params.func = amdgpu_vm_cpu_set_ptes;
1551                 params.pages_addr = pages_addr;
1552                 return amdgpu_vm_frag_ptes(&params, start, last + 1,
1553                                            addr, flags);
1554         }
1555
1556         ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1557
1558         nptes = last - start + 1;
1559
1560         /*
1561          * reserve space for two commands every (1 << BLOCK_SIZE)
1562          *  entries or 2k dwords (whatever is smaller)
1563          *
1564          * The second command is for the shadow pagetables.
1565          */
1566         if (vm->root.base.bo->shadow)
1567                 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1568         else
1569                 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1570
1571         /* padding, etc. */
1572         ndw = 64;
1573
1574         if (pages_addr) {
1575                 /* copy commands needed */
1576                 ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1577
1578                 /* and also PTEs */
1579                 ndw += nptes * 2;
1580
1581                 params.func = amdgpu_vm_do_copy_ptes;
1582
1583         } else {
1584                 /* set page commands needed */
1585                 ndw += ncmds * 10;
1586
1587                 /* extra commands for begin/end fragments */
1588                 if (vm->root.base.bo->shadow)
1589                         ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
1590                 else
1591                         ndw += 2 * 10 * adev->vm_manager.fragment_size;
1592
1593                 params.func = amdgpu_vm_do_set_ptes;
1594         }
1595
1596         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1597         if (r)
1598                 return r;
1599
1600         params.ib = &job->ibs[0];
1601
1602         if (pages_addr) {
1603                 uint64_t *pte;
1604                 unsigned i;
1605
1606                 /* Put the PTEs at the end of the IB. */
1607                 i = ndw - nptes * 2;
1608                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1609                 params.src = job->ibs->gpu_addr + i * 4;
1610
1611                 for (i = 0; i < nptes; ++i) {
1612                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1613                                                     AMDGPU_GPU_PAGE_SIZE);
1614                         pte[i] |= flags;
1615                 }
1616                 addr = 0;
1617         }
1618
1619         r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1620         if (r)
1621                 goto error_free;
1622
1623         r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1624                              owner, false);
1625         if (r)
1626                 goto error_free;
1627
1628         r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1629         if (r)
1630                 goto error_free;
1631
1632         r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1633         if (r)
1634                 goto error_free;
1635
1636         amdgpu_ring_pad_ib(ring, params.ib);
1637         WARN_ON(params.ib->length_dw > ndw);
1638         r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
1639         if (r)
1640                 goto error_free;
1641
1642         amdgpu_bo_fence(vm->root.base.bo, f, true);
1643         dma_fence_put(*fence);
1644         *fence = f;
1645         return 0;
1646
1647 error_free:
1648         amdgpu_job_free(job);
1649         return r;
1650 }
1651
1652 /**
1653  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1654  *
1655  * @adev: amdgpu_device pointer
1656  * @exclusive: fence we need to sync to
1657  * @pages_addr: DMA addresses to use for mapping
1658  * @vm: requested vm
1659  * @mapping: mapped range and flags to use for the update
1660  * @flags: HW flags for the mapping
1661  * @nodes: array of drm_mm_nodes with the MC addresses
1662  * @fence: optional resulting fence
1663  *
1664  * Split the mapping into smaller chunks so that each update fits
1665  * into a SDMA IB.
1666  *
1667  * Returns:
1668  * 0 for success, -EINVAL for failure.
1669  */
1670 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1671                                       struct dma_fence *exclusive,
1672                                       dma_addr_t *pages_addr,
1673                                       struct amdgpu_vm *vm,
1674                                       struct amdgpu_bo_va_mapping *mapping,
1675                                       uint64_t flags,
1676                                       struct drm_mm_node *nodes,
1677                                       struct dma_fence **fence)
1678 {
1679         unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1680         uint64_t pfn, start = mapping->start;
1681         int r;
1682
1683         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1684          * but in case of something, we filter the flags in first place
1685          */
1686         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1687                 flags &= ~AMDGPU_PTE_READABLE;
1688         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1689                 flags &= ~AMDGPU_PTE_WRITEABLE;
1690
1691         flags &= ~AMDGPU_PTE_EXECUTABLE;
1692         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1693
1694         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1695         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1696
1697         if ((mapping->flags & AMDGPU_PTE_PRT) &&
1698             (adev->asic_type >= CHIP_VEGA10)) {
1699                 flags |= AMDGPU_PTE_PRT;
1700                 flags &= ~AMDGPU_PTE_VALID;
1701         }
1702
1703         trace_amdgpu_vm_bo_update(mapping);
1704
1705         pfn = mapping->offset >> PAGE_SHIFT;
1706         if (nodes) {
1707                 while (pfn >= nodes->size) {
1708                         pfn -= nodes->size;
1709                         ++nodes;
1710                 }
1711         }
1712
1713         do {
1714                 dma_addr_t *dma_addr = NULL;
1715                 uint64_t max_entries;
1716                 uint64_t addr, last;
1717
1718                 if (nodes) {
1719                         addr = nodes->start << PAGE_SHIFT;
1720                         max_entries = (nodes->size - pfn) *
1721                                 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1722                 } else {
1723                         addr = 0;
1724                         max_entries = S64_MAX;
1725                 }
1726
1727                 if (pages_addr) {
1728                         uint64_t count;
1729
1730                         max_entries = min(max_entries, 16ull * 1024ull);
1731                         for (count = 1;
1732                              count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1733                              ++count) {
1734                                 uint64_t idx = pfn + count;
1735
1736                                 if (pages_addr[idx] !=
1737                                     (pages_addr[idx - 1] + PAGE_SIZE))
1738                                         break;
1739                         }
1740
1741                         if (count < min_linear_pages) {
1742                                 addr = pfn << PAGE_SHIFT;
1743                                 dma_addr = pages_addr;
1744                         } else {
1745                                 addr = pages_addr[pfn];
1746                                 max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1747                         }
1748
1749                 } else if (flags & AMDGPU_PTE_VALID) {
1750                         addr += adev->vm_manager.vram_base_offset;
1751                         addr += pfn << PAGE_SHIFT;
1752                 }
1753
1754                 last = min((uint64_t)mapping->last, start + max_entries - 1);
1755                 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1756                                                 start, last, flags, addr,
1757                                                 fence);
1758                 if (r)
1759                         return r;
1760
1761                 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1762                 if (nodes && nodes->size == pfn) {
1763                         pfn = 0;
1764                         ++nodes;
1765                 }
1766                 start = last + 1;
1767
1768         } while (unlikely(start != mapping->last + 1));
1769
1770         return 0;
1771 }
1772
1773 /**
1774  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1775  *
1776  * @adev: amdgpu_device pointer
1777  * @bo_va: requested BO and VM object
1778  * @clear: if true clear the entries
1779  *
1780  * Fill in the page table entries for @bo_va.
1781  *
1782  * Returns:
1783  * 0 for success, -EINVAL for failure.
1784  */
1785 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1786                         struct amdgpu_bo_va *bo_va,
1787                         bool clear)
1788 {
1789         struct amdgpu_bo *bo = bo_va->base.bo;
1790         struct amdgpu_vm *vm = bo_va->base.vm;
1791         struct amdgpu_bo_va_mapping *mapping;
1792         dma_addr_t *pages_addr = NULL;
1793         struct ttm_mem_reg *mem;
1794         struct drm_mm_node *nodes;
1795         struct dma_fence *exclusive, **last_update;
1796         uint64_t flags;
1797         int r;
1798
1799         if (clear || !bo) {
1800                 mem = NULL;
1801                 nodes = NULL;
1802                 exclusive = NULL;
1803         } else {
1804                 struct ttm_dma_tt *ttm;
1805
1806                 mem = &bo->tbo.mem;
1807                 nodes = mem->mm_node;
1808                 if (mem->mem_type == TTM_PL_TT) {
1809                         ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1810                         pages_addr = ttm->dma_address;
1811                 }
1812                 exclusive = reservation_object_get_excl(bo->tbo.resv);
1813         }
1814
1815         if (bo)
1816                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1817         else
1818                 flags = 0x0;
1819
1820         if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1821                 last_update = &vm->last_update;
1822         else
1823                 last_update = &bo_va->last_pt_update;
1824
1825         if (!clear && bo_va->base.moved) {
1826                 bo_va->base.moved = false;
1827                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1828
1829         } else if (bo_va->cleared != clear) {
1830                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1831         }
1832
1833         list_for_each_entry(mapping, &bo_va->invalids, list) {
1834                 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1835                                                mapping, flags, nodes,
1836                                                last_update);
1837                 if (r)
1838                         return r;
1839         }
1840
1841         if (vm->use_cpu_for_update) {
1842                 /* Flush HDP */
1843                 mb();
1844                 amdgpu_asic_flush_hdp(adev, NULL);
1845         }
1846
1847         /* If the BO is not in its preferred location add it back to
1848          * the evicted list so that it gets validated again on the
1849          * next command submission.
1850          */
1851         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1852                 uint32_t mem_type = bo->tbo.mem.mem_type;
1853
1854                 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1855                         amdgpu_vm_bo_evicted(&bo_va->base);
1856                 else
1857                         amdgpu_vm_bo_idle(&bo_va->base);
1858         } else {
1859                 amdgpu_vm_bo_done(&bo_va->base);
1860         }
1861
1862         list_splice_init(&bo_va->invalids, &bo_va->valids);
1863         bo_va->cleared = clear;
1864
1865         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1866                 list_for_each_entry(mapping, &bo_va->valids, list)
1867                         trace_amdgpu_vm_bo_mapping(mapping);
1868         }
1869
1870         return 0;
1871 }
1872
1873 /**
1874  * amdgpu_vm_update_prt_state - update the global PRT state
1875  *
1876  * @adev: amdgpu_device pointer
1877  */
1878 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1879 {
1880         unsigned long flags;
1881         bool enable;
1882
1883         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1884         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1885         adev->gmc.gmc_funcs->set_prt(adev, enable);
1886         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1887 }
1888
1889 /**
1890  * amdgpu_vm_prt_get - add a PRT user
1891  *
1892  * @adev: amdgpu_device pointer
1893  */
1894 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1895 {
1896         if (!adev->gmc.gmc_funcs->set_prt)
1897                 return;
1898
1899         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1900                 amdgpu_vm_update_prt_state(adev);
1901 }
1902
1903 /**
1904  * amdgpu_vm_prt_put - drop a PRT user
1905  *
1906  * @adev: amdgpu_device pointer
1907  */
1908 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1909 {
1910         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1911                 amdgpu_vm_update_prt_state(adev);
1912 }
1913
1914 /**
1915  * amdgpu_vm_prt_cb - callback for updating the PRT status
1916  *
1917  * @fence: fence for the callback
1918  * @_cb: the callback function
1919  */
1920 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1921 {
1922         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1923
1924         amdgpu_vm_prt_put(cb->adev);
1925         kfree(cb);
1926 }
1927
1928 /**
1929  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1930  *
1931  * @adev: amdgpu_device pointer
1932  * @fence: fence for the callback
1933  */
1934 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1935                                  struct dma_fence *fence)
1936 {
1937         struct amdgpu_prt_cb *cb;
1938
1939         if (!adev->gmc.gmc_funcs->set_prt)
1940                 return;
1941
1942         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1943         if (!cb) {
1944                 /* Last resort when we are OOM */
1945                 if (fence)
1946                         dma_fence_wait(fence, false);
1947
1948                 amdgpu_vm_prt_put(adev);
1949         } else {
1950                 cb->adev = adev;
1951                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1952                                                      amdgpu_vm_prt_cb))
1953                         amdgpu_vm_prt_cb(fence, &cb->cb);
1954         }
1955 }
1956
1957 /**
1958  * amdgpu_vm_free_mapping - free a mapping
1959  *
1960  * @adev: amdgpu_device pointer
1961  * @vm: requested vm
1962  * @mapping: mapping to be freed
1963  * @fence: fence of the unmap operation
1964  *
1965  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1966  */
1967 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1968                                    struct amdgpu_vm *vm,
1969                                    struct amdgpu_bo_va_mapping *mapping,
1970                                    struct dma_fence *fence)
1971 {
1972         if (mapping->flags & AMDGPU_PTE_PRT)
1973                 amdgpu_vm_add_prt_cb(adev, fence);
1974         kfree(mapping);
1975 }
1976
1977 /**
1978  * amdgpu_vm_prt_fini - finish all prt mappings
1979  *
1980  * @adev: amdgpu_device pointer
1981  * @vm: requested vm
1982  *
1983  * Register a cleanup callback to disable PRT support after VM dies.
1984  */
1985 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1986 {
1987         struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1988         struct dma_fence *excl, **shared;
1989         unsigned i, shared_count;
1990         int r;
1991
1992         r = reservation_object_get_fences_rcu(resv, &excl,
1993                                               &shared_count, &shared);
1994         if (r) {
1995                 /* Not enough memory to grab the fence list, as last resort
1996                  * block for all the fences to complete.
1997                  */
1998                 reservation_object_wait_timeout_rcu(resv, true, false,
1999                                                     MAX_SCHEDULE_TIMEOUT);
2000                 return;
2001         }
2002
2003         /* Add a callback for each fence in the reservation object */
2004         amdgpu_vm_prt_get(adev);
2005         amdgpu_vm_add_prt_cb(adev, excl);
2006
2007         for (i = 0; i < shared_count; ++i) {
2008                 amdgpu_vm_prt_get(adev);
2009                 amdgpu_vm_add_prt_cb(adev, shared[i]);
2010         }
2011
2012         kfree(shared);
2013 }
2014
2015 /**
2016  * amdgpu_vm_clear_freed - clear freed BOs in the PT
2017  *
2018  * @adev: amdgpu_device pointer
2019  * @vm: requested vm
2020  * @fence: optional resulting fence (unchanged if no work needed to be done
2021  * or if an error occurred)
2022  *
2023  * Make sure all freed BOs are cleared in the PT.
2024  * PTs have to be reserved and mutex must be locked!
2025  *
2026  * Returns:
2027  * 0 for success.
2028  *
2029  */
2030 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2031                           struct amdgpu_vm *vm,
2032                           struct dma_fence **fence)
2033 {
2034         struct amdgpu_bo_va_mapping *mapping;
2035         uint64_t init_pte_value = 0;
2036         struct dma_fence *f = NULL;
2037         int r;
2038
2039         while (!list_empty(&vm->freed)) {
2040                 mapping = list_first_entry(&vm->freed,
2041                         struct amdgpu_bo_va_mapping, list);
2042                 list_del(&mapping->list);
2043
2044                 if (vm->pte_support_ats &&
2045                     mapping->start < AMDGPU_GMC_HOLE_START)
2046                         init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
2047
2048                 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
2049                                                 mapping->start, mapping->last,
2050                                                 init_pte_value, 0, &f);
2051                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
2052                 if (r) {
2053                         dma_fence_put(f);
2054                         return r;
2055                 }
2056         }
2057
2058         if (fence && f) {
2059                 dma_fence_put(*fence);
2060                 *fence = f;
2061         } else {
2062                 dma_fence_put(f);
2063         }
2064
2065         return 0;
2066
2067 }
2068
2069 /**
2070  * amdgpu_vm_handle_moved - handle moved BOs in the PT
2071  *
2072  * @adev: amdgpu_device pointer
2073  * @vm: requested vm
2074  *
2075  * Make sure all BOs which are moved are updated in the PTs.
2076  *
2077  * Returns:
2078  * 0 for success.
2079  *
2080  * PTs have to be reserved!
2081  */
2082 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2083                            struct amdgpu_vm *vm)
2084 {
2085         struct amdgpu_bo_va *bo_va, *tmp;
2086         struct reservation_object *resv;
2087         bool clear;
2088         int r;
2089
2090         list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2091                 /* Per VM BOs never need to bo cleared in the page tables */
2092                 r = amdgpu_vm_bo_update(adev, bo_va, false);
2093                 if (r)
2094                         return r;
2095         }
2096
2097         spin_lock(&vm->invalidated_lock);
2098         while (!list_empty(&vm->invalidated)) {
2099                 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
2100                                          base.vm_status);
2101                 resv = bo_va->base.bo->tbo.resv;
2102                 spin_unlock(&vm->invalidated_lock);
2103
2104                 /* Try to reserve the BO to avoid clearing its ptes */
2105                 if (!amdgpu_vm_debug && reservation_object_trylock(resv))
2106                         clear = false;
2107                 /* Somebody else is using the BO right now */
2108                 else
2109                         clear = true;
2110
2111                 r = amdgpu_vm_bo_update(adev, bo_va, clear);
2112                 if (r)
2113                         return r;
2114
2115                 if (!clear)
2116                         reservation_object_unlock(resv);
2117                 spin_lock(&vm->invalidated_lock);
2118         }
2119         spin_unlock(&vm->invalidated_lock);
2120
2121         return 0;
2122 }
2123
2124 /**
2125  * amdgpu_vm_bo_add - add a bo to a specific vm
2126  *
2127  * @adev: amdgpu_device pointer
2128  * @vm: requested vm
2129  * @bo: amdgpu buffer object
2130  *
2131  * Add @bo into the requested vm.
2132  * Add @bo to the list of bos associated with the vm
2133  *
2134  * Returns:
2135  * Newly added bo_va or NULL for failure
2136  *
2137  * Object has to be reserved!
2138  */
2139 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2140                                       struct amdgpu_vm *vm,
2141                                       struct amdgpu_bo *bo)
2142 {
2143         struct amdgpu_bo_va *bo_va;
2144
2145         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2146         if (bo_va == NULL) {
2147                 return NULL;
2148         }
2149         amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2150
2151         bo_va->ref_count = 1;
2152         INIT_LIST_HEAD(&bo_va->valids);
2153         INIT_LIST_HEAD(&bo_va->invalids);
2154
2155         return bo_va;
2156 }
2157
2158
2159 /**
2160  * amdgpu_vm_bo_insert_mapping - insert a new mapping
2161  *
2162  * @adev: amdgpu_device pointer
2163  * @bo_va: bo_va to store the address
2164  * @mapping: the mapping to insert
2165  *
2166  * Insert a new mapping into all structures.
2167  */
2168 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2169                                     struct amdgpu_bo_va *bo_va,
2170                                     struct amdgpu_bo_va_mapping *mapping)
2171 {
2172         struct amdgpu_vm *vm = bo_va->base.vm;
2173         struct amdgpu_bo *bo = bo_va->base.bo;
2174
2175         mapping->bo_va = bo_va;
2176         list_add(&mapping->list, &bo_va->invalids);
2177         amdgpu_vm_it_insert(mapping, &vm->va);
2178
2179         if (mapping->flags & AMDGPU_PTE_PRT)
2180                 amdgpu_vm_prt_get(adev);
2181
2182         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2183             !bo_va->base.moved) {
2184                 list_move(&bo_va->base.vm_status, &vm->moved);
2185         }
2186         trace_amdgpu_vm_bo_map(bo_va, mapping);
2187 }
2188
2189 /**
2190  * amdgpu_vm_bo_map - map bo inside a vm
2191  *
2192  * @adev: amdgpu_device pointer
2193  * @bo_va: bo_va to store the address
2194  * @saddr: where to map the BO
2195  * @offset: requested offset in the BO
2196  * @size: BO size in bytes
2197  * @flags: attributes of pages (read/write/valid/etc.)
2198  *
2199  * Add a mapping of the BO at the specefied addr into the VM.
2200  *
2201  * Returns:
2202  * 0 for success, error for failure.
2203  *
2204  * Object has to be reserved and unreserved outside!
2205  */
2206 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2207                      struct amdgpu_bo_va *bo_va,
2208                      uint64_t saddr, uint64_t offset,
2209                      uint64_t size, uint64_t flags)
2210 {
2211         struct amdgpu_bo_va_mapping *mapping, *tmp;
2212         struct amdgpu_bo *bo = bo_va->base.bo;
2213         struct amdgpu_vm *vm = bo_va->base.vm;
2214         uint64_t eaddr;
2215
2216         /* validate the parameters */
2217         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2218             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2219                 return -EINVAL;
2220
2221         /* make sure object fit at this offset */
2222         eaddr = saddr + size - 1;
2223         if (saddr >= eaddr ||
2224             (bo && offset + size > amdgpu_bo_size(bo)))
2225                 return -EINVAL;
2226
2227         saddr /= AMDGPU_GPU_PAGE_SIZE;
2228         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2229
2230         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2231         if (tmp) {
2232                 /* bo and tmp overlap, invalid addr */
2233                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2234                         "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2235                         tmp->start, tmp->last + 1);
2236                 return -EINVAL;
2237         }
2238
2239         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2240         if (!mapping)
2241                 return -ENOMEM;
2242
2243         mapping->start = saddr;
2244         mapping->last = eaddr;
2245         mapping->offset = offset;
2246         mapping->flags = flags;
2247
2248         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2249
2250         return 0;
2251 }
2252
2253 /**
2254  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2255  *
2256  * @adev: amdgpu_device pointer
2257  * @bo_va: bo_va to store the address
2258  * @saddr: where to map the BO
2259  * @offset: requested offset in the BO
2260  * @size: BO size in bytes
2261  * @flags: attributes of pages (read/write/valid/etc.)
2262  *
2263  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2264  * mappings as we do so.
2265  *
2266  * Returns:
2267  * 0 for success, error for failure.
2268  *
2269  * Object has to be reserved and unreserved outside!
2270  */
2271 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2272                              struct amdgpu_bo_va *bo_va,
2273                              uint64_t saddr, uint64_t offset,
2274                              uint64_t size, uint64_t flags)
2275 {
2276         struct amdgpu_bo_va_mapping *mapping;
2277         struct amdgpu_bo *bo = bo_va->base.bo;
2278         uint64_t eaddr;
2279         int r;
2280
2281         /* validate the parameters */
2282         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2283             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2284                 return -EINVAL;
2285
2286         /* make sure object fit at this offset */
2287         eaddr = saddr + size - 1;
2288         if (saddr >= eaddr ||
2289             (bo && offset + size > amdgpu_bo_size(bo)))
2290                 return -EINVAL;
2291
2292         /* Allocate all the needed memory */
2293         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2294         if (!mapping)
2295                 return -ENOMEM;
2296
2297         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2298         if (r) {
2299                 kfree(mapping);
2300                 return r;
2301         }
2302
2303         saddr /= AMDGPU_GPU_PAGE_SIZE;
2304         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2305
2306         mapping->start = saddr;
2307         mapping->last = eaddr;
2308         mapping->offset = offset;
2309         mapping->flags = flags;
2310
2311         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2312
2313         return 0;
2314 }
2315
2316 /**
2317  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2318  *
2319  * @adev: amdgpu_device pointer
2320  * @bo_va: bo_va to remove the address from
2321  * @saddr: where to the BO is mapped
2322  *
2323  * Remove a mapping of the BO at the specefied addr from the VM.
2324  *
2325  * Returns:
2326  * 0 for success, error for failure.
2327  *
2328  * Object has to be reserved and unreserved outside!
2329  */
2330 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2331                        struct amdgpu_bo_va *bo_va,
2332                        uint64_t saddr)
2333 {
2334         struct amdgpu_bo_va_mapping *mapping;
2335         struct amdgpu_vm *vm = bo_va->base.vm;
2336         bool valid = true;
2337
2338         saddr /= AMDGPU_GPU_PAGE_SIZE;
2339
2340         list_for_each_entry(mapping, &bo_va->valids, list) {
2341                 if (mapping->start == saddr)
2342                         break;
2343         }
2344
2345         if (&mapping->list == &bo_va->valids) {
2346                 valid = false;
2347
2348                 list_for_each_entry(mapping, &bo_va->invalids, list) {
2349                         if (mapping->start == saddr)
2350                                 break;
2351                 }
2352
2353                 if (&mapping->list == &bo_va->invalids)
2354                         return -ENOENT;
2355         }
2356
2357         list_del(&mapping->list);
2358         amdgpu_vm_it_remove(mapping, &vm->va);
2359         mapping->bo_va = NULL;
2360         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2361
2362         if (valid)
2363                 list_add(&mapping->list, &vm->freed);
2364         else
2365                 amdgpu_vm_free_mapping(adev, vm, mapping,
2366                                        bo_va->last_pt_update);
2367
2368         return 0;
2369 }
2370
2371 /**
2372  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2373  *
2374  * @adev: amdgpu_device pointer
2375  * @vm: VM structure to use
2376  * @saddr: start of the range
2377  * @size: size of the range
2378  *
2379  * Remove all mappings in a range, split them as appropriate.
2380  *
2381  * Returns:
2382  * 0 for success, error for failure.
2383  */
2384 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2385                                 struct amdgpu_vm *vm,
2386                                 uint64_t saddr, uint64_t size)
2387 {
2388         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2389         LIST_HEAD(removed);
2390         uint64_t eaddr;
2391
2392         eaddr = saddr + size - 1;
2393         saddr /= AMDGPU_GPU_PAGE_SIZE;
2394         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2395
2396         /* Allocate all the needed memory */
2397         before = kzalloc(sizeof(*before), GFP_KERNEL);
2398         if (!before)
2399                 return -ENOMEM;
2400         INIT_LIST_HEAD(&before->list);
2401
2402         after = kzalloc(sizeof(*after), GFP_KERNEL);
2403         if (!after) {
2404                 kfree(before);
2405                 return -ENOMEM;
2406         }
2407         INIT_LIST_HEAD(&after->list);
2408
2409         /* Now gather all removed mappings */
2410         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2411         while (tmp) {
2412                 /* Remember mapping split at the start */
2413                 if (tmp->start < saddr) {
2414                         before->start = tmp->start;
2415                         before->last = saddr - 1;
2416                         before->offset = tmp->offset;
2417                         before->flags = tmp->flags;
2418                         before->bo_va = tmp->bo_va;
2419                         list_add(&before->list, &tmp->bo_va->invalids);
2420                 }
2421
2422                 /* Remember mapping split at the end */
2423                 if (tmp->last > eaddr) {
2424                         after->start = eaddr + 1;
2425                         after->last = tmp->last;
2426                         after->offset = tmp->offset;
2427                         after->offset += after->start - tmp->start;
2428                         after->flags = tmp->flags;
2429                         after->bo_va = tmp->bo_va;
2430                         list_add(&after->list, &tmp->bo_va->invalids);
2431                 }
2432
2433                 list_del(&tmp->list);
2434                 list_add(&tmp->list, &removed);
2435
2436                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2437         }
2438
2439         /* And free them up */
2440         list_for_each_entry_safe(tmp, next, &removed, list) {
2441                 amdgpu_vm_it_remove(tmp, &vm->va);
2442                 list_del(&tmp->list);
2443
2444                 if (tmp->start < saddr)
2445                     tmp->start = saddr;
2446                 if (tmp->last > eaddr)
2447                     tmp->last = eaddr;
2448
2449                 tmp->bo_va = NULL;
2450                 list_add(&tmp->list, &vm->freed);
2451                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2452         }
2453
2454         /* Insert partial mapping before the range */
2455         if (!list_empty(&before->list)) {
2456                 amdgpu_vm_it_insert(before, &vm->va);
2457                 if (before->flags & AMDGPU_PTE_PRT)
2458                         amdgpu_vm_prt_get(adev);
2459         } else {
2460                 kfree(before);
2461         }
2462
2463         /* Insert partial mapping after the range */
2464         if (!list_empty(&after->list)) {
2465                 amdgpu_vm_it_insert(after, &vm->va);
2466                 if (after->flags & AMDGPU_PTE_PRT)
2467                         amdgpu_vm_prt_get(adev);
2468         } else {
2469                 kfree(after);
2470         }
2471
2472         return 0;
2473 }
2474
2475 /**
2476  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2477  *
2478  * @vm: the requested VM
2479  * @addr: the address
2480  *
2481  * Find a mapping by it's address.
2482  *
2483  * Returns:
2484  * The amdgpu_bo_va_mapping matching for addr or NULL
2485  *
2486  */
2487 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2488                                                          uint64_t addr)
2489 {
2490         return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2491 }
2492
2493 /**
2494  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2495  *
2496  * @vm: the requested vm
2497  * @ticket: CS ticket
2498  *
2499  * Trace all mappings of BOs reserved during a command submission.
2500  */
2501 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2502 {
2503         struct amdgpu_bo_va_mapping *mapping;
2504
2505         if (!trace_amdgpu_vm_bo_cs_enabled())
2506                 return;
2507
2508         for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2509              mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2510                 if (mapping->bo_va && mapping->bo_va->base.bo) {
2511                         struct amdgpu_bo *bo;
2512
2513                         bo = mapping->bo_va->base.bo;
2514                         if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2515                                 continue;
2516                 }
2517
2518                 trace_amdgpu_vm_bo_cs(mapping);
2519         }
2520 }
2521
2522 /**
2523  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2524  *
2525  * @adev: amdgpu_device pointer
2526  * @bo_va: requested bo_va
2527  *
2528  * Remove @bo_va->bo from the requested vm.
2529  *
2530  * Object have to be reserved!
2531  */
2532 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2533                       struct amdgpu_bo_va *bo_va)
2534 {
2535         struct amdgpu_bo_va_mapping *mapping, *next;
2536         struct amdgpu_bo *bo = bo_va->base.bo;
2537         struct amdgpu_vm *vm = bo_va->base.vm;
2538
2539         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv)
2540                 vm->bulk_moveable = false;
2541
2542         list_del(&bo_va->base.bo_list);
2543
2544         spin_lock(&vm->invalidated_lock);
2545         list_del(&bo_va->base.vm_status);
2546         spin_unlock(&vm->invalidated_lock);
2547
2548         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2549                 list_del(&mapping->list);
2550                 amdgpu_vm_it_remove(mapping, &vm->va);
2551                 mapping->bo_va = NULL;
2552                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2553                 list_add(&mapping->list, &vm->freed);
2554         }
2555         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2556                 list_del(&mapping->list);
2557                 amdgpu_vm_it_remove(mapping, &vm->va);
2558                 amdgpu_vm_free_mapping(adev, vm, mapping,
2559                                        bo_va->last_pt_update);
2560         }
2561
2562         dma_fence_put(bo_va->last_pt_update);
2563         kfree(bo_va);
2564 }
2565
2566 /**
2567  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2568  *
2569  * @adev: amdgpu_device pointer
2570  * @bo: amdgpu buffer object
2571  * @evicted: is the BO evicted
2572  *
2573  * Mark @bo as invalid.
2574  */
2575 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2576                              struct amdgpu_bo *bo, bool evicted)
2577 {
2578         struct amdgpu_vm_bo_base *bo_base;
2579
2580         /* shadow bo doesn't have bo base, its validation needs its parent */
2581         if (bo->parent && bo->parent->shadow == bo)
2582                 bo = bo->parent;
2583
2584         list_for_each_entry(bo_base, &bo->va, bo_list) {
2585                 struct amdgpu_vm *vm = bo_base->vm;
2586
2587                 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2588                         amdgpu_vm_bo_evicted(bo_base);
2589                         continue;
2590                 }
2591
2592                 if (bo_base->moved)
2593                         continue;
2594                 bo_base->moved = true;
2595
2596                 if (bo->tbo.type == ttm_bo_type_kernel)
2597                         amdgpu_vm_bo_relocated(bo_base);
2598                 else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
2599                         amdgpu_vm_bo_moved(bo_base);
2600                 else
2601                         amdgpu_vm_bo_invalidated(bo_base);
2602         }
2603 }
2604
2605 /**
2606  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2607  *
2608  * @vm_size: VM size
2609  *
2610  * Returns:
2611  * VM page table as power of two
2612  */
2613 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2614 {
2615         /* Total bits covered by PD + PTs */
2616         unsigned bits = ilog2(vm_size) + 18;
2617
2618         /* Make sure the PD is 4K in size up to 8GB address space.
2619            Above that split equal between PD and PTs */
2620         if (vm_size <= 8)
2621                 return (bits - 9);
2622         else
2623                 return ((bits + 3) / 2);
2624 }
2625
2626 /**
2627  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2628  *
2629  * @adev: amdgpu_device pointer
2630  * @min_vm_size: the minimum vm size in GB if it's set auto
2631  * @fragment_size_default: Default PTE fragment size
2632  * @max_level: max VMPT level
2633  * @max_bits: max address space size in bits
2634  *
2635  */
2636 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2637                            uint32_t fragment_size_default, unsigned max_level,
2638                            unsigned max_bits)
2639 {
2640         unsigned int max_size = 1 << (max_bits - 30);
2641         unsigned int vm_size;
2642         uint64_t tmp;
2643
2644         /* adjust vm size first */
2645         if (amdgpu_vm_size != -1) {
2646                 vm_size = amdgpu_vm_size;
2647                 if (vm_size > max_size) {
2648                         dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2649                                  amdgpu_vm_size, max_size);
2650                         vm_size = max_size;
2651                 }
2652         } else {
2653                 struct sysinfo si;
2654                 unsigned int phys_ram_gb;
2655
2656                 /* Optimal VM size depends on the amount of physical
2657                  * RAM available. Underlying requirements and
2658                  * assumptions:
2659                  *
2660                  *  - Need to map system memory and VRAM from all GPUs
2661                  *     - VRAM from other GPUs not known here
2662                  *     - Assume VRAM <= system memory
2663                  *  - On GFX8 and older, VM space can be segmented for
2664                  *    different MTYPEs
2665                  *  - Need to allow room for fragmentation, guard pages etc.
2666                  *
2667                  * This adds up to a rough guess of system memory x3.
2668                  * Round up to power of two to maximize the available
2669                  * VM size with the given page table size.
2670                  */
2671                 si_meminfo(&si);
2672                 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2673                                (1 << 30) - 1) >> 30;
2674                 vm_size = roundup_pow_of_two(
2675                         min(max(phys_ram_gb * 3, min_vm_size), max_size));
2676         }
2677
2678         adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2679
2680         tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2681         if (amdgpu_vm_block_size != -1)
2682                 tmp >>= amdgpu_vm_block_size - 9;
2683         tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2684         adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2685         switch (adev->vm_manager.num_level) {
2686         case 3:
2687                 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2688                 break;
2689         case 2:
2690                 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2691                 break;
2692         case 1:
2693                 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2694                 break;
2695         default:
2696                 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2697         }
2698         /* block size depends on vm size and hw setup*/
2699         if (amdgpu_vm_block_size != -1)
2700                 adev->vm_manager.block_size =
2701                         min((unsigned)amdgpu_vm_block_size, max_bits
2702                             - AMDGPU_GPU_PAGE_SHIFT
2703                             - 9 * adev->vm_manager.num_level);
2704         else if (adev->vm_manager.num_level > 1)
2705                 adev->vm_manager.block_size = 9;
2706         else
2707                 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2708
2709         if (amdgpu_vm_fragment_size == -1)
2710                 adev->vm_manager.fragment_size = fragment_size_default;
2711         else
2712                 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2713
2714         DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2715                  vm_size, adev->vm_manager.num_level + 1,
2716                  adev->vm_manager.block_size,
2717                  adev->vm_manager.fragment_size);
2718 }
2719
2720 /**
2721  * amdgpu_vm_init - initialize a vm instance
2722  *
2723  * @adev: amdgpu_device pointer
2724  * @vm: requested vm
2725  * @vm_context: Indicates if it GFX or Compute context
2726  * @pasid: Process address space identifier
2727  *
2728  * Init @vm fields.
2729  *
2730  * Returns:
2731  * 0 for success, error for failure.
2732  */
2733 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2734                    int vm_context, unsigned int pasid)
2735 {
2736         struct amdgpu_bo_param bp;
2737         struct amdgpu_bo *root;
2738         int r, i;
2739
2740         vm->va = RB_ROOT_CACHED;
2741         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2742                 vm->reserved_vmid[i] = NULL;
2743         INIT_LIST_HEAD(&vm->evicted);
2744         INIT_LIST_HEAD(&vm->relocated);
2745         INIT_LIST_HEAD(&vm->moved);
2746         INIT_LIST_HEAD(&vm->idle);
2747         INIT_LIST_HEAD(&vm->invalidated);
2748         spin_lock_init(&vm->invalidated_lock);
2749         INIT_LIST_HEAD(&vm->freed);
2750
2751         /* create scheduler entity for page table updates */
2752         r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
2753                                   adev->vm_manager.vm_pte_num_rqs, NULL);
2754         if (r)
2755                 return r;
2756
2757         vm->pte_support_ats = false;
2758
2759         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2760                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2761                                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2762
2763                 if (adev->asic_type == CHIP_RAVEN)
2764                         vm->pte_support_ats = true;
2765         } else {
2766                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2767                                                 AMDGPU_VM_USE_CPU_FOR_GFX);
2768         }
2769         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2770                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2771         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2772                   "CPU update of VM recommended only for large BAR system\n");
2773         vm->last_update = NULL;
2774
2775         amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
2776         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
2777                 bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
2778         r = amdgpu_bo_create(adev, &bp, &root);
2779         if (r)
2780                 goto error_free_sched_entity;
2781
2782         r = amdgpu_bo_reserve(root, true);
2783         if (r)
2784                 goto error_free_root;
2785
2786         r = amdgpu_vm_clear_bo(adev, vm, root,
2787                                adev->vm_manager.root_level,
2788                                vm->pte_support_ats);
2789         if (r)
2790                 goto error_unreserve;
2791
2792         amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2793         amdgpu_bo_unreserve(vm->root.base.bo);
2794
2795         if (pasid) {
2796                 unsigned long flags;
2797
2798                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2799                 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2800                               GFP_ATOMIC);
2801                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2802                 if (r < 0)
2803                         goto error_free_root;
2804
2805                 vm->pasid = pasid;
2806         }
2807
2808         INIT_KFIFO(vm->faults);
2809         vm->fault_credit = 16;
2810
2811         return 0;
2812
2813 error_unreserve:
2814         amdgpu_bo_unreserve(vm->root.base.bo);
2815
2816 error_free_root:
2817         amdgpu_bo_unref(&vm->root.base.bo->shadow);
2818         amdgpu_bo_unref(&vm->root.base.bo);
2819         vm->root.base.bo = NULL;
2820
2821 error_free_sched_entity:
2822         drm_sched_entity_destroy(&vm->entity);
2823
2824         return r;
2825 }
2826
2827 /**
2828  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2829  *
2830  * @adev: amdgpu_device pointer
2831  * @vm: requested vm
2832  *
2833  * This only works on GFX VMs that don't have any BOs added and no
2834  * page tables allocated yet.
2835  *
2836  * Changes the following VM parameters:
2837  * - use_cpu_for_update
2838  * - pte_supports_ats
2839  * - pasid (old PASID is released, because compute manages its own PASIDs)
2840  *
2841  * Reinitializes the page directory to reflect the changed ATS
2842  * setting.
2843  *
2844  * Returns:
2845  * 0 for success, -errno for errors.
2846  */
2847 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
2848 {
2849         bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2850         int r;
2851
2852         r = amdgpu_bo_reserve(vm->root.base.bo, true);
2853         if (r)
2854                 return r;
2855
2856         /* Sanity checks */
2857         if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2858                 r = -EINVAL;
2859                 goto unreserve_bo;
2860         }
2861
2862         if (pasid) {
2863                 unsigned long flags;
2864
2865                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2866                 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2867                               GFP_ATOMIC);
2868                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2869
2870                 if (r == -ENOSPC)
2871                         goto unreserve_bo;
2872                 r = 0;
2873         }
2874
2875         /* Check if PD needs to be reinitialized and do it before
2876          * changing any other state, in case it fails.
2877          */
2878         if (pte_support_ats != vm->pte_support_ats) {
2879                 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2880                                adev->vm_manager.root_level,
2881                                pte_support_ats);
2882                 if (r)
2883                         goto free_idr;
2884         }
2885
2886         /* Update VM state */
2887         vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2888                                     AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2889         vm->pte_support_ats = pte_support_ats;
2890         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2891                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2892         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2893                   "CPU update of VM recommended only for large BAR system\n");
2894
2895         if (vm->pasid) {
2896                 unsigned long flags;
2897
2898                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2899                 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2900                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2901
2902                 /* Free the original amdgpu allocated pasid
2903                  * Will be replaced with kfd allocated pasid
2904                  */
2905                 amdgpu_pasid_free(vm->pasid);
2906                 vm->pasid = 0;
2907         }
2908
2909         /* Free the shadow bo for compute VM */
2910         amdgpu_bo_unref(&vm->root.base.bo->shadow);
2911
2912         if (pasid)
2913                 vm->pasid = pasid;
2914
2915         goto unreserve_bo;
2916
2917 free_idr:
2918         if (pasid) {
2919                 unsigned long flags;
2920
2921                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2922                 idr_remove(&adev->vm_manager.pasid_idr, pasid);
2923                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2924         }
2925 unreserve_bo:
2926         amdgpu_bo_unreserve(vm->root.base.bo);
2927         return r;
2928 }
2929
2930 /**
2931  * amdgpu_vm_release_compute - release a compute vm
2932  * @adev: amdgpu_device pointer
2933  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2934  *
2935  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2936  * pasid from vm. Compute should stop use of vm after this call.
2937  */
2938 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2939 {
2940         if (vm->pasid) {
2941                 unsigned long flags;
2942
2943                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2944                 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2945                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2946         }
2947         vm->pasid = 0;
2948 }
2949
2950 /**
2951  * amdgpu_vm_free_levels - free PD/PT levels
2952  *
2953  * @adev: amdgpu device structure
2954  * @parent: PD/PT starting level to free
2955  * @level: level of parent structure
2956  *
2957  * Free the page directory or page table level and all sub levels.
2958  */
2959 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2960                                   struct amdgpu_vm_pt *parent,
2961                                   unsigned level)
2962 {
2963         unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2964
2965         if (parent->base.bo) {
2966                 list_del(&parent->base.bo_list);
2967                 list_del(&parent->base.vm_status);
2968                 amdgpu_bo_unref(&parent->base.bo->shadow);
2969                 amdgpu_bo_unref(&parent->base.bo);
2970         }
2971
2972         if (parent->entries)
2973                 for (i = 0; i < num_entries; i++)
2974                         amdgpu_vm_free_levels(adev, &parent->entries[i],
2975                                               level + 1);
2976
2977         kvfree(parent->entries);
2978 }
2979
2980 /**
2981  * amdgpu_vm_fini - tear down a vm instance
2982  *
2983  * @adev: amdgpu_device pointer
2984  * @vm: requested vm
2985  *
2986  * Tear down @vm.
2987  * Unbind the VM and remove all bos from the vm bo list
2988  */
2989 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2990 {
2991         struct amdgpu_bo_va_mapping *mapping, *tmp;
2992         bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2993         struct amdgpu_bo *root;
2994         u64 fault;
2995         int i, r;
2996
2997         amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2998
2999         /* Clear pending page faults from IH when the VM is destroyed */
3000         while (kfifo_get(&vm->faults, &fault))
3001                 amdgpu_ih_clear_fault(adev, fault);
3002
3003         if (vm->pasid) {
3004                 unsigned long flags;
3005
3006                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3007                 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3008                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3009         }
3010
3011         drm_sched_entity_destroy(&vm->entity);
3012
3013         if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
3014                 dev_err(adev->dev, "still active bo inside vm\n");
3015         }
3016         rbtree_postorder_for_each_entry_safe(mapping, tmp,
3017                                              &vm->va.rb_root, rb) {
3018                 list_del(&mapping->list);
3019                 amdgpu_vm_it_remove(mapping, &vm->va);
3020                 kfree(mapping);
3021         }
3022         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
3023                 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
3024                         amdgpu_vm_prt_fini(adev, vm);
3025                         prt_fini_needed = false;
3026                 }
3027
3028                 list_del(&mapping->list);
3029                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
3030         }
3031
3032         root = amdgpu_bo_ref(vm->root.base.bo);
3033         r = amdgpu_bo_reserve(root, true);
3034         if (r) {
3035                 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
3036         } else {
3037                 amdgpu_vm_free_levels(adev, &vm->root,
3038                                       adev->vm_manager.root_level);
3039                 amdgpu_bo_unreserve(root);
3040         }
3041         amdgpu_bo_unref(&root);
3042         dma_fence_put(vm->last_update);
3043         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
3044                 amdgpu_vmid_free_reserved(adev, vm, i);
3045 }
3046
3047 /**
3048  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
3049  *
3050  * @adev: amdgpu_device pointer
3051  * @pasid: PASID do identify the VM
3052  *
3053  * This function is expected to be called in interrupt context.
3054  *
3055  * Returns:
3056  * True if there was fault credit, false otherwise
3057  */
3058 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
3059                                   unsigned int pasid)
3060 {
3061         struct amdgpu_vm *vm;
3062
3063         spin_lock(&adev->vm_manager.pasid_lock);
3064         vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3065         if (!vm) {
3066                 /* VM not found, can't track fault credit */
3067                 spin_unlock(&adev->vm_manager.pasid_lock);
3068                 return true;
3069         }
3070
3071         /* No lock needed. only accessed by IRQ handler */
3072         if (!vm->fault_credit) {
3073                 /* Too many faults in this VM */
3074                 spin_unlock(&adev->vm_manager.pasid_lock);
3075                 return false;
3076         }
3077
3078         vm->fault_credit--;
3079         spin_unlock(&adev->vm_manager.pasid_lock);
3080         return true;
3081 }
3082
3083 /**
3084  * amdgpu_vm_manager_init - init the VM manager
3085  *
3086  * @adev: amdgpu_device pointer
3087  *
3088  * Initialize the VM manager structures
3089  */
3090 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3091 {
3092         unsigned i;
3093
3094         amdgpu_vmid_mgr_init(adev);
3095
3096         adev->vm_manager.fence_context =
3097                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3098         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3099                 adev->vm_manager.seqno[i] = 0;
3100
3101         spin_lock_init(&adev->vm_manager.prt_lock);
3102         atomic_set(&adev->vm_manager.num_prt_users, 0);
3103
3104         /* If not overridden by the user, by default, only in large BAR systems
3105          * Compute VM tables will be updated by CPU
3106          */
3107 #ifdef CONFIG_X86_64
3108         if (amdgpu_vm_update_mode == -1) {
3109                 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3110                         adev->vm_manager.vm_update_mode =
3111                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3112                 else
3113                         adev->vm_manager.vm_update_mode = 0;
3114         } else
3115                 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3116 #else
3117         adev->vm_manager.vm_update_mode = 0;
3118 #endif
3119
3120         idr_init(&adev->vm_manager.pasid_idr);
3121         spin_lock_init(&adev->vm_manager.pasid_lock);
3122 }
3123
3124 /**
3125  * amdgpu_vm_manager_fini - cleanup VM manager
3126  *
3127  * @adev: amdgpu_device pointer
3128  *
3129  * Cleanup the VM manager and free resources.
3130  */
3131 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3132 {
3133         WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3134         idr_destroy(&adev->vm_manager.pasid_idr);
3135
3136         amdgpu_vmid_mgr_fini(adev);
3137 }
3138
3139 /**
3140  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3141  *
3142  * @dev: drm device pointer
3143  * @data: drm_amdgpu_vm
3144  * @filp: drm file pointer
3145  *
3146  * Returns:
3147  * 0 for success, -errno for errors.
3148  */
3149 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3150 {
3151         union drm_amdgpu_vm *args = data;
3152         struct amdgpu_device *adev = dev->dev_private;
3153         struct amdgpu_fpriv *fpriv = filp->driver_priv;
3154         int r;
3155
3156         switch (args->in.op) {
3157         case AMDGPU_VM_OP_RESERVE_VMID:
3158                 /* current, we only have requirement to reserve vmid from gfxhub */
3159                 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
3160                 if (r)
3161                         return r;
3162                 break;
3163         case AMDGPU_VM_OP_UNRESERVE_VMID:
3164                 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
3165                 break;
3166         default:
3167                 return -EINVAL;
3168         }
3169
3170         return 0;
3171 }
3172
3173 /**
3174  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3175  *
3176  * @adev: drm device pointer
3177  * @pasid: PASID identifier for VM
3178  * @task_info: task_info to fill.
3179  */
3180 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3181                          struct amdgpu_task_info *task_info)
3182 {
3183         struct amdgpu_vm *vm;
3184
3185         spin_lock(&adev->vm_manager.pasid_lock);
3186
3187         vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3188         if (vm)
3189                 *task_info = vm->task_info;
3190
3191         spin_unlock(&adev->vm_manager.pasid_lock);
3192 }
3193
3194 /**
3195  * amdgpu_vm_set_task_info - Sets VMs task info.
3196  *
3197  * @vm: vm for which to set the info
3198  */
3199 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3200 {
3201         if (!vm->task_info.pid) {
3202                 vm->task_info.pid = current->pid;
3203                 get_task_comm(vm->task_info.task_name, current);
3204
3205                 if (current->group_leader->mm == current->mm) {
3206                         vm->task_info.tgid = current->group_leader->pid;
3207                         get_task_comm(vm->task_info.process_name, current->group_leader);
3208                 }
3209         }
3210 }
This page took 0.228879 seconds and 4 git commands to generate.