]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drm/amdgpu: use amdgpu_bo_param for amdgpu_bo_create v2
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36
37 /*
38  * GPUVM
39  * GPUVM is similar to the legacy gart on older asics, however
40  * rather than there being a single global gart table
41  * for the entire GPU, there are multiple VM page tables active
42  * at any given time.  The VM page tables can contain a mix
43  * vram pages and system memory pages and system memory pages
44  * can be mapped as snooped (cached system pages) or unsnooped
45  * (uncached system pages).
46  * Each VM has an ID associated with it and there is a page table
47  * associated with each VMID.  When execting a command buffer,
48  * the kernel tells the the ring what VMID to use for that command
49  * buffer.  VMIDs are allocated dynamically as commands are submitted.
50  * The userspace drivers maintain their own address space and the kernel
51  * sets up their pages tables accordingly when they submit their
52  * command buffers and a VMID is assigned.
53  * Cayman/Trinity support up to 8 active VMs at any given time;
54  * SI supports 16.
55  */
56
57 #define START(node) ((node)->start)
58 #define LAST(node) ((node)->last)
59
60 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
61                      START, LAST, static, amdgpu_vm_it)
62
63 #undef START
64 #undef LAST
65
66 /* Local structure. Encapsulate some VM table update parameters to reduce
67  * the number of function parameters
68  */
69 struct amdgpu_pte_update_params {
70         /* amdgpu device we do this update for */
71         struct amdgpu_device *adev;
72         /* optional amdgpu_vm we do this update for */
73         struct amdgpu_vm *vm;
74         /* address where to copy page table entries from */
75         uint64_t src;
76         /* indirect buffer to fill with commands */
77         struct amdgpu_ib *ib;
78         /* Function which actually does the update */
79         void (*func)(struct amdgpu_pte_update_params *params,
80                      struct amdgpu_bo *bo, uint64_t pe,
81                      uint64_t addr, unsigned count, uint32_t incr,
82                      uint64_t flags);
83         /* The next two are used during VM update by CPU
84          *  DMA addresses to use for mapping
85          *  Kernel pointer of PD/PT BO that needs to be updated
86          */
87         dma_addr_t *pages_addr;
88         void *kptr;
89 };
90
91 /* Helper to disable partial resident texture feature from a fence callback */
92 struct amdgpu_prt_cb {
93         struct amdgpu_device *adev;
94         struct dma_fence_cb cb;
95 };
96
97 /**
98  * amdgpu_vm_level_shift - return the addr shift for each level
99  *
100  * @adev: amdgpu_device pointer
101  *
102  * Returns the number of bits the pfn needs to be right shifted for a level.
103  */
104 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
105                                       unsigned level)
106 {
107         unsigned shift = 0xff;
108
109         switch (level) {
110         case AMDGPU_VM_PDB2:
111         case AMDGPU_VM_PDB1:
112         case AMDGPU_VM_PDB0:
113                 shift = 9 * (AMDGPU_VM_PDB0 - level) +
114                         adev->vm_manager.block_size;
115                 break;
116         case AMDGPU_VM_PTB:
117                 shift = 0;
118                 break;
119         default:
120                 dev_err(adev->dev, "the level%d isn't supported.\n", level);
121         }
122
123         return shift;
124 }
125
126 /**
127  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
128  *
129  * @adev: amdgpu_device pointer
130  *
131  * Calculate the number of entries in a page directory or page table.
132  */
133 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
134                                       unsigned level)
135 {
136         unsigned shift = amdgpu_vm_level_shift(adev,
137                                                adev->vm_manager.root_level);
138
139         if (level == adev->vm_manager.root_level)
140                 /* For the root directory */
141                 return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
142         else if (level != AMDGPU_VM_PTB)
143                 /* Everything in between */
144                 return 512;
145         else
146                 /* For the page tables on the leaves */
147                 return AMDGPU_VM_PTE_COUNT(adev);
148 }
149
150 /**
151  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
152  *
153  * @adev: amdgpu_device pointer
154  *
155  * Calculate the size of the BO for a page directory or page table in bytes.
156  */
157 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
158 {
159         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
160 }
161
162 /**
163  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
164  *
165  * @vm: vm providing the BOs
166  * @validated: head of validation list
167  * @entry: entry to add
168  *
169  * Add the page directory to the list of BOs to
170  * validate for command submission.
171  */
172 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
173                          struct list_head *validated,
174                          struct amdgpu_bo_list_entry *entry)
175 {
176         entry->robj = vm->root.base.bo;
177         entry->priority = 0;
178         entry->tv.bo = &entry->robj->tbo;
179         entry->tv.shared = true;
180         entry->user_pages = NULL;
181         list_add(&entry->tv.head, validated);
182 }
183
184 /**
185  * amdgpu_vm_validate_pt_bos - validate the page table BOs
186  *
187  * @adev: amdgpu device pointer
188  * @vm: vm providing the BOs
189  * @validate: callback to do the validation
190  * @param: parameter for the validation callback
191  *
192  * Validate the page table BOs on command submission if neccessary.
193  */
194 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
195                               int (*validate)(void *p, struct amdgpu_bo *bo),
196                               void *param)
197 {
198         struct ttm_bo_global *glob = adev->mman.bdev.glob;
199         int r;
200
201         spin_lock(&vm->status_lock);
202         while (!list_empty(&vm->evicted)) {
203                 struct amdgpu_vm_bo_base *bo_base;
204                 struct amdgpu_bo *bo;
205
206                 bo_base = list_first_entry(&vm->evicted,
207                                            struct amdgpu_vm_bo_base,
208                                            vm_status);
209                 spin_unlock(&vm->status_lock);
210
211                 bo = bo_base->bo;
212                 BUG_ON(!bo);
213                 if (bo->parent) {
214                         r = validate(param, bo);
215                         if (r)
216                                 return r;
217
218                         spin_lock(&glob->lru_lock);
219                         ttm_bo_move_to_lru_tail(&bo->tbo);
220                         if (bo->shadow)
221                                 ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
222                         spin_unlock(&glob->lru_lock);
223                 }
224
225                 if (bo->tbo.type == ttm_bo_type_kernel &&
226                     vm->use_cpu_for_update) {
227                         r = amdgpu_bo_kmap(bo, NULL);
228                         if (r)
229                                 return r;
230                 }
231
232                 spin_lock(&vm->status_lock);
233                 if (bo->tbo.type != ttm_bo_type_kernel)
234                         list_move(&bo_base->vm_status, &vm->moved);
235                 else
236                         list_move(&bo_base->vm_status, &vm->relocated);
237         }
238         spin_unlock(&vm->status_lock);
239
240         return 0;
241 }
242
243 /**
244  * amdgpu_vm_ready - check VM is ready for updates
245  *
246  * @vm: VM to check
247  *
248  * Check if all VM PDs/PTs are ready for updates
249  */
250 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
251 {
252         bool ready;
253
254         spin_lock(&vm->status_lock);
255         ready = list_empty(&vm->evicted);
256         spin_unlock(&vm->status_lock);
257
258         return ready;
259 }
260
261 /**
262  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
263  *
264  * @adev: amdgpu_device pointer
265  * @bo: BO to clear
266  * @level: level this BO is at
267  *
268  * Root PD needs to be reserved when calling this.
269  */
270 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
271                               struct amdgpu_vm *vm, struct amdgpu_bo *bo,
272                               unsigned level, bool pte_support_ats)
273 {
274         struct ttm_operation_ctx ctx = { true, false };
275         struct dma_fence *fence = NULL;
276         unsigned entries, ats_entries;
277         struct amdgpu_ring *ring;
278         struct amdgpu_job *job;
279         uint64_t addr;
280         int r;
281
282         addr = amdgpu_bo_gpu_offset(bo);
283         entries = amdgpu_bo_size(bo) / 8;
284
285         if (pte_support_ats) {
286                 if (level == adev->vm_manager.root_level) {
287                         ats_entries = amdgpu_vm_level_shift(adev, level);
288                         ats_entries += AMDGPU_GPU_PAGE_SHIFT;
289                         ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
290                         ats_entries = min(ats_entries, entries);
291                         entries -= ats_entries;
292                 } else {
293                         ats_entries = entries;
294                         entries = 0;
295                 }
296         } else {
297                 ats_entries = 0;
298         }
299
300         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
301
302         r = reservation_object_reserve_shared(bo->tbo.resv);
303         if (r)
304                 return r;
305
306         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
307         if (r)
308                 goto error;
309
310         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
311         if (r)
312                 goto error;
313
314         if (ats_entries) {
315                 uint64_t ats_value;
316
317                 ats_value = AMDGPU_PTE_DEFAULT_ATC;
318                 if (level != AMDGPU_VM_PTB)
319                         ats_value |= AMDGPU_PDE_PTE;
320
321                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
322                                       ats_entries, 0, ats_value);
323                 addr += ats_entries * 8;
324         }
325
326         if (entries)
327                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
328                                       entries, 0, 0);
329
330         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
331
332         WARN_ON(job->ibs[0].length_dw > 64);
333         r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
334                              AMDGPU_FENCE_OWNER_UNDEFINED, false);
335         if (r)
336                 goto error_free;
337
338         r = amdgpu_job_submit(job, ring, &vm->entity,
339                               AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
340         if (r)
341                 goto error_free;
342
343         amdgpu_bo_fence(bo, fence, true);
344         dma_fence_put(fence);
345
346         if (bo->shadow)
347                 return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
348                                           level, pte_support_ats);
349
350         return 0;
351
352 error_free:
353         amdgpu_job_free(job);
354
355 error:
356         return r;
357 }
358
359 /**
360  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
361  *
362  * @adev: amdgpu_device pointer
363  * @vm: requested vm
364  * @saddr: start of the address range
365  * @eaddr: end of the address range
366  *
367  * Make sure the page directories and page tables are allocated
368  */
369 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
370                                   struct amdgpu_vm *vm,
371                                   struct amdgpu_vm_pt *parent,
372                                   uint64_t saddr, uint64_t eaddr,
373                                   unsigned level, bool ats)
374 {
375         unsigned shift = amdgpu_vm_level_shift(adev, level);
376         unsigned pt_idx, from, to;
377         u64 flags;
378         int r;
379
380         if (!parent->entries) {
381                 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
382
383                 parent->entries = kvmalloc_array(num_entries,
384                                                    sizeof(struct amdgpu_vm_pt),
385                                                    GFP_KERNEL | __GFP_ZERO);
386                 if (!parent->entries)
387                         return -ENOMEM;
388                 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
389         }
390
391         from = saddr >> shift;
392         to = eaddr >> shift;
393         if (from >= amdgpu_vm_num_entries(adev, level) ||
394             to >= amdgpu_vm_num_entries(adev, level))
395                 return -EINVAL;
396
397         ++level;
398         saddr = saddr & ((1 << shift) - 1);
399         eaddr = eaddr & ((1 << shift) - 1);
400
401         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
402         if (vm->use_cpu_for_update)
403                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
404         else
405                 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
406                                 AMDGPU_GEM_CREATE_SHADOW);
407
408         /* walk over the address space and allocate the page tables */
409         for (pt_idx = from; pt_idx <= to; ++pt_idx) {
410                 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
411                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
412                 struct amdgpu_bo *pt;
413
414                 if (!entry->base.bo) {
415                         struct amdgpu_bo_param bp;
416
417                         memset(&bp, 0, sizeof(bp));
418                         bp.size = amdgpu_vm_bo_size(adev, level);
419                         bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
420                         bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
421                         bp.flags = flags;
422                         bp.type = ttm_bo_type_kernel;
423                         bp.resv = resv;
424                         r = amdgpu_bo_create(adev, &bp, &pt);
425                         if (r)
426                                 return r;
427
428                         r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
429                         if (r) {
430                                 amdgpu_bo_unref(&pt->shadow);
431                                 amdgpu_bo_unref(&pt);
432                                 return r;
433                         }
434
435                         if (vm->use_cpu_for_update) {
436                                 r = amdgpu_bo_kmap(pt, NULL);
437                                 if (r) {
438                                         amdgpu_bo_unref(&pt->shadow);
439                                         amdgpu_bo_unref(&pt);
440                                         return r;
441                                 }
442                         }
443
444                         /* Keep a reference to the root directory to avoid
445                         * freeing them up in the wrong order.
446                         */
447                         pt->parent = amdgpu_bo_ref(parent->base.bo);
448
449                         entry->base.vm = vm;
450                         entry->base.bo = pt;
451                         list_add_tail(&entry->base.bo_list, &pt->va);
452                         spin_lock(&vm->status_lock);
453                         list_add(&entry->base.vm_status, &vm->relocated);
454                         spin_unlock(&vm->status_lock);
455                 }
456
457                 if (level < AMDGPU_VM_PTB) {
458                         uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
459                         uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
460                                 ((1 << shift) - 1);
461                         r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
462                                                    sub_eaddr, level, ats);
463                         if (r)
464                                 return r;
465                 }
466         }
467
468         return 0;
469 }
470
471 /**
472  * amdgpu_vm_alloc_pts - Allocate page tables.
473  *
474  * @adev: amdgpu_device pointer
475  * @vm: VM to allocate page tables for
476  * @saddr: Start address which needs to be allocated
477  * @size: Size from start address we need.
478  *
479  * Make sure the page tables are allocated.
480  */
481 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
482                         struct amdgpu_vm *vm,
483                         uint64_t saddr, uint64_t size)
484 {
485         uint64_t eaddr;
486         bool ats = false;
487
488         /* validate the parameters */
489         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
490                 return -EINVAL;
491
492         eaddr = saddr + size - 1;
493
494         if (vm->pte_support_ats)
495                 ats = saddr < AMDGPU_VA_HOLE_START;
496
497         saddr /= AMDGPU_GPU_PAGE_SIZE;
498         eaddr /= AMDGPU_GPU_PAGE_SIZE;
499
500         if (eaddr >= adev->vm_manager.max_pfn) {
501                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
502                         eaddr, adev->vm_manager.max_pfn);
503                 return -EINVAL;
504         }
505
506         return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
507                                       adev->vm_manager.root_level, ats);
508 }
509
510 /**
511  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
512  *
513  * @adev: amdgpu_device pointer
514  */
515 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
516 {
517         const struct amdgpu_ip_block *ip_block;
518         bool has_compute_vm_bug;
519         struct amdgpu_ring *ring;
520         int i;
521
522         has_compute_vm_bug = false;
523
524         ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
525         if (ip_block) {
526                 /* Compute has a VM bug for GFX version < 7.
527                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
528                 if (ip_block->version->major <= 7)
529                         has_compute_vm_bug = true;
530                 else if (ip_block->version->major == 8)
531                         if (adev->gfx.mec_fw_version < 673)
532                                 has_compute_vm_bug = true;
533         }
534
535         for (i = 0; i < adev->num_rings; i++) {
536                 ring = adev->rings[i];
537                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
538                         /* only compute rings */
539                         ring->has_compute_vm_bug = has_compute_vm_bug;
540                 else
541                         ring->has_compute_vm_bug = false;
542         }
543 }
544
545 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
546                                   struct amdgpu_job *job)
547 {
548         struct amdgpu_device *adev = ring->adev;
549         unsigned vmhub = ring->funcs->vmhub;
550         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
551         struct amdgpu_vmid *id;
552         bool gds_switch_needed;
553         bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
554
555         if (job->vmid == 0)
556                 return false;
557         id = &id_mgr->ids[job->vmid];
558         gds_switch_needed = ring->funcs->emit_gds_switch && (
559                 id->gds_base != job->gds_base ||
560                 id->gds_size != job->gds_size ||
561                 id->gws_base != job->gws_base ||
562                 id->gws_size != job->gws_size ||
563                 id->oa_base != job->oa_base ||
564                 id->oa_size != job->oa_size);
565
566         if (amdgpu_vmid_had_gpu_reset(adev, id))
567                 return true;
568
569         return vm_flush_needed || gds_switch_needed;
570 }
571
572 static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
573 {
574         return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
575 }
576
577 /**
578  * amdgpu_vm_flush - hardware flush the vm
579  *
580  * @ring: ring to use for flush
581  * @vmid: vmid number to use
582  * @pd_addr: address of the page directory
583  *
584  * Emit a VM flush when it is necessary.
585  */
586 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
587 {
588         struct amdgpu_device *adev = ring->adev;
589         unsigned vmhub = ring->funcs->vmhub;
590         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
591         struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
592         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
593                 id->gds_base != job->gds_base ||
594                 id->gds_size != job->gds_size ||
595                 id->gws_base != job->gws_base ||
596                 id->gws_size != job->gws_size ||
597                 id->oa_base != job->oa_base ||
598                 id->oa_size != job->oa_size);
599         bool vm_flush_needed = job->vm_needs_flush;
600         bool pasid_mapping_needed = id->pasid != job->pasid ||
601                 !id->pasid_mapping ||
602                 !dma_fence_is_signaled(id->pasid_mapping);
603         struct dma_fence *fence = NULL;
604         unsigned patch_offset = 0;
605         int r;
606
607         if (amdgpu_vmid_had_gpu_reset(adev, id)) {
608                 gds_switch_needed = true;
609                 vm_flush_needed = true;
610                 pasid_mapping_needed = true;
611         }
612
613         gds_switch_needed &= !!ring->funcs->emit_gds_switch;
614         vm_flush_needed &= !!ring->funcs->emit_vm_flush;
615         pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
616                 ring->funcs->emit_wreg;
617
618         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
619                 return 0;
620
621         if (ring->funcs->init_cond_exec)
622                 patch_offset = amdgpu_ring_init_cond_exec(ring);
623
624         if (need_pipe_sync)
625                 amdgpu_ring_emit_pipeline_sync(ring);
626
627         if (vm_flush_needed) {
628                 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
629                 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
630         }
631
632         if (pasid_mapping_needed)
633                 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
634
635         if (vm_flush_needed || pasid_mapping_needed) {
636                 r = amdgpu_fence_emit(ring, &fence);
637                 if (r)
638                         return r;
639         }
640
641         if (vm_flush_needed) {
642                 mutex_lock(&id_mgr->lock);
643                 dma_fence_put(id->last_flush);
644                 id->last_flush = dma_fence_get(fence);
645                 id->current_gpu_reset_count =
646                         atomic_read(&adev->gpu_reset_counter);
647                 mutex_unlock(&id_mgr->lock);
648         }
649
650         if (pasid_mapping_needed) {
651                 id->pasid = job->pasid;
652                 dma_fence_put(id->pasid_mapping);
653                 id->pasid_mapping = dma_fence_get(fence);
654         }
655         dma_fence_put(fence);
656
657         if (ring->funcs->emit_gds_switch && gds_switch_needed) {
658                 id->gds_base = job->gds_base;
659                 id->gds_size = job->gds_size;
660                 id->gws_base = job->gws_base;
661                 id->gws_size = job->gws_size;
662                 id->oa_base = job->oa_base;
663                 id->oa_size = job->oa_size;
664                 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
665                                             job->gds_size, job->gws_base,
666                                             job->gws_size, job->oa_base,
667                                             job->oa_size);
668         }
669
670         if (ring->funcs->patch_cond_exec)
671                 amdgpu_ring_patch_cond_exec(ring, patch_offset);
672
673         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
674         if (ring->funcs->emit_switch_buffer) {
675                 amdgpu_ring_emit_switch_buffer(ring);
676                 amdgpu_ring_emit_switch_buffer(ring);
677         }
678         return 0;
679 }
680
681 /**
682  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
683  *
684  * @vm: requested vm
685  * @bo: requested buffer object
686  *
687  * Find @bo inside the requested vm.
688  * Search inside the @bos vm list for the requested vm
689  * Returns the found bo_va or NULL if none is found
690  *
691  * Object has to be reserved!
692  */
693 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
694                                        struct amdgpu_bo *bo)
695 {
696         struct amdgpu_bo_va *bo_va;
697
698         list_for_each_entry(bo_va, &bo->va, base.bo_list) {
699                 if (bo_va->base.vm == vm) {
700                         return bo_va;
701                 }
702         }
703         return NULL;
704 }
705
706 /**
707  * amdgpu_vm_do_set_ptes - helper to call the right asic function
708  *
709  * @params: see amdgpu_pte_update_params definition
710  * @bo: PD/PT to update
711  * @pe: addr of the page entry
712  * @addr: dst addr to write into pe
713  * @count: number of page entries to update
714  * @incr: increase next addr by incr bytes
715  * @flags: hw access flags
716  *
717  * Traces the parameters and calls the right asic functions
718  * to setup the page table using the DMA.
719  */
720 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
721                                   struct amdgpu_bo *bo,
722                                   uint64_t pe, uint64_t addr,
723                                   unsigned count, uint32_t incr,
724                                   uint64_t flags)
725 {
726         pe += amdgpu_bo_gpu_offset(bo);
727         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
728
729         if (count < 3) {
730                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
731                                     addr | flags, count, incr);
732
733         } else {
734                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
735                                       count, incr, flags);
736         }
737 }
738
739 /**
740  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
741  *
742  * @params: see amdgpu_pte_update_params definition
743  * @bo: PD/PT to update
744  * @pe: addr of the page entry
745  * @addr: dst addr to write into pe
746  * @count: number of page entries to update
747  * @incr: increase next addr by incr bytes
748  * @flags: hw access flags
749  *
750  * Traces the parameters and calls the DMA function to copy the PTEs.
751  */
752 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
753                                    struct amdgpu_bo *bo,
754                                    uint64_t pe, uint64_t addr,
755                                    unsigned count, uint32_t incr,
756                                    uint64_t flags)
757 {
758         uint64_t src = (params->src + (addr >> 12) * 8);
759
760         pe += amdgpu_bo_gpu_offset(bo);
761         trace_amdgpu_vm_copy_ptes(pe, src, count);
762
763         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
764 }
765
766 /**
767  * amdgpu_vm_map_gart - Resolve gart mapping of addr
768  *
769  * @pages_addr: optional DMA address to use for lookup
770  * @addr: the unmapped addr
771  *
772  * Look up the physical address of the page that the pte resolves
773  * to and return the pointer for the page table entry.
774  */
775 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
776 {
777         uint64_t result;
778
779         /* page table offset */
780         result = pages_addr[addr >> PAGE_SHIFT];
781
782         /* in case cpu page size != gpu page size*/
783         result |= addr & (~PAGE_MASK);
784
785         result &= 0xFFFFFFFFFFFFF000ULL;
786
787         return result;
788 }
789
790 /**
791  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
792  *
793  * @params: see amdgpu_pte_update_params definition
794  * @bo: PD/PT to update
795  * @pe: kmap addr of the page entry
796  * @addr: dst addr to write into pe
797  * @count: number of page entries to update
798  * @incr: increase next addr by incr bytes
799  * @flags: hw access flags
800  *
801  * Write count number of PT/PD entries directly.
802  */
803 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
804                                    struct amdgpu_bo *bo,
805                                    uint64_t pe, uint64_t addr,
806                                    unsigned count, uint32_t incr,
807                                    uint64_t flags)
808 {
809         unsigned int i;
810         uint64_t value;
811
812         pe += (unsigned long)amdgpu_bo_kptr(bo);
813
814         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
815
816         for (i = 0; i < count; i++) {
817                 value = params->pages_addr ?
818                         amdgpu_vm_map_gart(params->pages_addr, addr) :
819                         addr;
820                 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
821                                        i, value, flags);
822                 addr += incr;
823         }
824 }
825
826 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
827                              void *owner)
828 {
829         struct amdgpu_sync sync;
830         int r;
831
832         amdgpu_sync_create(&sync);
833         amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
834         r = amdgpu_sync_wait(&sync, true);
835         amdgpu_sync_free(&sync);
836
837         return r;
838 }
839
840 /*
841  * amdgpu_vm_update_pde - update a single level in the hierarchy
842  *
843  * @param: parameters for the update
844  * @vm: requested vm
845  * @parent: parent directory
846  * @entry: entry to update
847  *
848  * Makes sure the requested entry in parent is up to date.
849  */
850 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
851                                  struct amdgpu_vm *vm,
852                                  struct amdgpu_vm_pt *parent,
853                                  struct amdgpu_vm_pt *entry)
854 {
855         struct amdgpu_bo *bo = parent->base.bo, *pbo;
856         uint64_t pde, pt, flags;
857         unsigned level;
858
859         /* Don't update huge pages here */
860         if (entry->huge)
861                 return;
862
863         for (level = 0, pbo = bo->parent; pbo; ++level)
864                 pbo = pbo->parent;
865
866         level += params->adev->vm_manager.root_level;
867         pt = amdgpu_bo_gpu_offset(entry->base.bo);
868         flags = AMDGPU_PTE_VALID;
869         amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
870         pde = (entry - parent->entries) * 8;
871         if (bo->shadow)
872                 params->func(params, bo->shadow, pde, pt, 1, 0, flags);
873         params->func(params, bo, pde, pt, 1, 0, flags);
874 }
875
876 /*
877  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
878  *
879  * @parent: parent PD
880  *
881  * Mark all PD level as invalid after an error.
882  */
883 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
884                                        struct amdgpu_vm *vm,
885                                        struct amdgpu_vm_pt *parent,
886                                        unsigned level)
887 {
888         unsigned pt_idx, num_entries;
889
890         /*
891          * Recurse into the subdirectories. This recursion is harmless because
892          * we only have a maximum of 5 layers.
893          */
894         num_entries = amdgpu_vm_num_entries(adev, level);
895         for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
896                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
897
898                 if (!entry->base.bo)
899                         continue;
900
901                 spin_lock(&vm->status_lock);
902                 if (list_empty(&entry->base.vm_status))
903                         list_add(&entry->base.vm_status, &vm->relocated);
904                 spin_unlock(&vm->status_lock);
905                 amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
906         }
907 }
908
909 /*
910  * amdgpu_vm_update_directories - make sure that all directories are valid
911  *
912  * @adev: amdgpu_device pointer
913  * @vm: requested vm
914  *
915  * Makes sure all directories are up to date.
916  * Returns 0 for success, error for failure.
917  */
918 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
919                                  struct amdgpu_vm *vm)
920 {
921         struct amdgpu_pte_update_params params;
922         struct amdgpu_job *job;
923         unsigned ndw = 0;
924         int r = 0;
925
926         if (list_empty(&vm->relocated))
927                 return 0;
928
929 restart:
930         memset(&params, 0, sizeof(params));
931         params.adev = adev;
932
933         if (vm->use_cpu_for_update) {
934                 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
935                 if (unlikely(r))
936                         return r;
937
938                 params.func = amdgpu_vm_cpu_set_ptes;
939         } else {
940                 ndw = 512 * 8;
941                 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
942                 if (r)
943                         return r;
944
945                 params.ib = &job->ibs[0];
946                 params.func = amdgpu_vm_do_set_ptes;
947         }
948
949         spin_lock(&vm->status_lock);
950         while (!list_empty(&vm->relocated)) {
951                 struct amdgpu_vm_bo_base *bo_base, *parent;
952                 struct amdgpu_vm_pt *pt, *entry;
953                 struct amdgpu_bo *bo;
954
955                 bo_base = list_first_entry(&vm->relocated,
956                                            struct amdgpu_vm_bo_base,
957                                            vm_status);
958                 list_del_init(&bo_base->vm_status);
959                 spin_unlock(&vm->status_lock);
960
961                 bo = bo_base->bo->parent;
962                 if (!bo) {
963                         spin_lock(&vm->status_lock);
964                         continue;
965                 }
966
967                 parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
968                                           bo_list);
969                 pt = container_of(parent, struct amdgpu_vm_pt, base);
970                 entry = container_of(bo_base, struct amdgpu_vm_pt, base);
971
972                 amdgpu_vm_update_pde(&params, vm, pt, entry);
973
974                 spin_lock(&vm->status_lock);
975                 if (!vm->use_cpu_for_update &&
976                     (ndw - params.ib->length_dw) < 32)
977                         break;
978         }
979         spin_unlock(&vm->status_lock);
980
981         if (vm->use_cpu_for_update) {
982                 /* Flush HDP */
983                 mb();
984                 amdgpu_asic_flush_hdp(adev, NULL);
985         } else if (params.ib->length_dw == 0) {
986                 amdgpu_job_free(job);
987         } else {
988                 struct amdgpu_bo *root = vm->root.base.bo;
989                 struct amdgpu_ring *ring;
990                 struct dma_fence *fence;
991
992                 ring = container_of(vm->entity.sched, struct amdgpu_ring,
993                                     sched);
994
995                 amdgpu_ring_pad_ib(ring, params.ib);
996                 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
997                                  AMDGPU_FENCE_OWNER_VM, false);
998                 WARN_ON(params.ib->length_dw > ndw);
999                 r = amdgpu_job_submit(job, ring, &vm->entity,
1000                                       AMDGPU_FENCE_OWNER_VM, &fence);
1001                 if (r)
1002                         goto error;
1003
1004                 amdgpu_bo_fence(root, fence, true);
1005                 dma_fence_put(vm->last_update);
1006                 vm->last_update = fence;
1007         }
1008
1009         if (!list_empty(&vm->relocated))
1010                 goto restart;
1011
1012         return 0;
1013
1014 error:
1015         amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1016                                    adev->vm_manager.root_level);
1017         amdgpu_job_free(job);
1018         return r;
1019 }
1020
1021 /**
1022  * amdgpu_vm_find_entry - find the entry for an address
1023  *
1024  * @p: see amdgpu_pte_update_params definition
1025  * @addr: virtual address in question
1026  * @entry: resulting entry or NULL
1027  * @parent: parent entry
1028  *
1029  * Find the vm_pt entry and it's parent for the given address.
1030  */
1031 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1032                          struct amdgpu_vm_pt **entry,
1033                          struct amdgpu_vm_pt **parent)
1034 {
1035         unsigned level = p->adev->vm_manager.root_level;
1036
1037         *parent = NULL;
1038         *entry = &p->vm->root;
1039         while ((*entry)->entries) {
1040                 unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1041
1042                 *parent = *entry;
1043                 *entry = &(*entry)->entries[addr >> shift];
1044                 addr &= (1ULL << shift) - 1;
1045         }
1046
1047         if (level != AMDGPU_VM_PTB)
1048                 *entry = NULL;
1049 }
1050
1051 /**
1052  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1053  *
1054  * @p: see amdgpu_pte_update_params definition
1055  * @entry: vm_pt entry to check
1056  * @parent: parent entry
1057  * @nptes: number of PTEs updated with this operation
1058  * @dst: destination address where the PTEs should point to
1059  * @flags: access flags fro the PTEs
1060  *
1061  * Check if we can update the PD with a huge page.
1062  */
1063 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1064                                         struct amdgpu_vm_pt *entry,
1065                                         struct amdgpu_vm_pt *parent,
1066                                         unsigned nptes, uint64_t dst,
1067                                         uint64_t flags)
1068 {
1069         uint64_t pde;
1070
1071         /* In the case of a mixed PT the PDE must point to it*/
1072         if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1073             nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1074                 /* Set the huge page flag to stop scanning at this PDE */
1075                 flags |= AMDGPU_PDE_PTE;
1076         }
1077
1078         if (!(flags & AMDGPU_PDE_PTE)) {
1079                 if (entry->huge) {
1080                         /* Add the entry to the relocated list to update it. */
1081                         entry->huge = false;
1082                         spin_lock(&p->vm->status_lock);
1083                         list_move(&entry->base.vm_status, &p->vm->relocated);
1084                         spin_unlock(&p->vm->status_lock);
1085                 }
1086                 return;
1087         }
1088
1089         entry->huge = true;
1090         amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1091
1092         pde = (entry - parent->entries) * 8;
1093         if (parent->base.bo->shadow)
1094                 p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1095         p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1096 }
1097
1098 /**
1099  * amdgpu_vm_update_ptes - make sure that page tables are valid
1100  *
1101  * @params: see amdgpu_pte_update_params definition
1102  * @vm: requested vm
1103  * @start: start of GPU address range
1104  * @end: end of GPU address range
1105  * @dst: destination address to map to, the next dst inside the function
1106  * @flags: mapping flags
1107  *
1108  * Update the page tables in the range @start - @end.
1109  * Returns 0 for success, -EINVAL for failure.
1110  */
1111 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1112                                   uint64_t start, uint64_t end,
1113                                   uint64_t dst, uint64_t flags)
1114 {
1115         struct amdgpu_device *adev = params->adev;
1116         const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1117
1118         uint64_t addr, pe_start;
1119         struct amdgpu_bo *pt;
1120         unsigned nptes;
1121
1122         /* walk over the address space and update the page tables */
1123         for (addr = start; addr < end; addr += nptes,
1124              dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1125                 struct amdgpu_vm_pt *entry, *parent;
1126
1127                 amdgpu_vm_get_entry(params, addr, &entry, &parent);
1128                 if (!entry)
1129                         return -ENOENT;
1130
1131                 if ((addr & ~mask) == (end & ~mask))
1132                         nptes = end - addr;
1133                 else
1134                         nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1135
1136                 amdgpu_vm_handle_huge_pages(params, entry, parent,
1137                                             nptes, dst, flags);
1138                 /* We don't need to update PTEs for huge pages */
1139                 if (entry->huge)
1140                         continue;
1141
1142                 pt = entry->base.bo;
1143                 pe_start = (addr & mask) * 8;
1144                 if (pt->shadow)
1145                         params->func(params, pt->shadow, pe_start, dst, nptes,
1146                                      AMDGPU_GPU_PAGE_SIZE, flags);
1147                 params->func(params, pt, pe_start, dst, nptes,
1148                              AMDGPU_GPU_PAGE_SIZE, flags);
1149         }
1150
1151         return 0;
1152 }
1153
1154 /*
1155  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1156  *
1157  * @params: see amdgpu_pte_update_params definition
1158  * @vm: requested vm
1159  * @start: first PTE to handle
1160  * @end: last PTE to handle
1161  * @dst: addr those PTEs should point to
1162  * @flags: hw mapping flags
1163  * Returns 0 for success, -EINVAL for failure.
1164  */
1165 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params  *params,
1166                                 uint64_t start, uint64_t end,
1167                                 uint64_t dst, uint64_t flags)
1168 {
1169         /**
1170          * The MC L1 TLB supports variable sized pages, based on a fragment
1171          * field in the PTE. When this field is set to a non-zero value, page
1172          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1173          * flags are considered valid for all PTEs within the fragment range
1174          * and corresponding mappings are assumed to be physically contiguous.
1175          *
1176          * The L1 TLB can store a single PTE for the whole fragment,
1177          * significantly increasing the space available for translation
1178          * caching. This leads to large improvements in throughput when the
1179          * TLB is under pressure.
1180          *
1181          * The L2 TLB distributes small and large fragments into two
1182          * asymmetric partitions. The large fragment cache is significantly
1183          * larger. Thus, we try to use large fragments wherever possible.
1184          * Userspace can support this by aligning virtual base address and
1185          * allocation size to the fragment size.
1186          */
1187         unsigned max_frag = params->adev->vm_manager.fragment_size;
1188         int r;
1189
1190         /* system pages are non continuously */
1191         if (params->src || !(flags & AMDGPU_PTE_VALID))
1192                 return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1193
1194         while (start != end) {
1195                 uint64_t frag_flags, frag_end;
1196                 unsigned frag;
1197
1198                 /* This intentionally wraps around if no bit is set */
1199                 frag = min((unsigned)ffs(start) - 1,
1200                            (unsigned)fls64(end - start) - 1);
1201                 if (frag >= max_frag) {
1202                         frag_flags = AMDGPU_PTE_FRAG(max_frag);
1203                         frag_end = end & ~((1ULL << max_frag) - 1);
1204                 } else {
1205                         frag_flags = AMDGPU_PTE_FRAG(frag);
1206                         frag_end = start + (1 << frag);
1207                 }
1208
1209                 r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1210                                           flags | frag_flags);
1211                 if (r)
1212                         return r;
1213
1214                 dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1215                 start = frag_end;
1216         }
1217
1218         return 0;
1219 }
1220
1221 /**
1222  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1223  *
1224  * @adev: amdgpu_device pointer
1225  * @exclusive: fence we need to sync to
1226  * @pages_addr: DMA addresses to use for mapping
1227  * @vm: requested vm
1228  * @start: start of mapped range
1229  * @last: last mapped entry
1230  * @flags: flags for the entries
1231  * @addr: addr to set the area to
1232  * @fence: optional resulting fence
1233  *
1234  * Fill in the page table entries between @start and @last.
1235  * Returns 0 for success, -EINVAL for failure.
1236  */
1237 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1238                                        struct dma_fence *exclusive,
1239                                        dma_addr_t *pages_addr,
1240                                        struct amdgpu_vm *vm,
1241                                        uint64_t start, uint64_t last,
1242                                        uint64_t flags, uint64_t addr,
1243                                        struct dma_fence **fence)
1244 {
1245         struct amdgpu_ring *ring;
1246         void *owner = AMDGPU_FENCE_OWNER_VM;
1247         unsigned nptes, ncmds, ndw;
1248         struct amdgpu_job *job;
1249         struct amdgpu_pte_update_params params;
1250         struct dma_fence *f = NULL;
1251         int r;
1252
1253         memset(&params, 0, sizeof(params));
1254         params.adev = adev;
1255         params.vm = vm;
1256
1257         /* sync to everything on unmapping */
1258         if (!(flags & AMDGPU_PTE_VALID))
1259                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1260
1261         if (vm->use_cpu_for_update) {
1262                 /* params.src is used as flag to indicate system Memory */
1263                 if (pages_addr)
1264                         params.src = ~0;
1265
1266                 /* Wait for PT BOs to be free. PTs share the same resv. object
1267                  * as the root PD BO
1268                  */
1269                 r = amdgpu_vm_wait_pd(adev, vm, owner);
1270                 if (unlikely(r))
1271                         return r;
1272
1273                 params.func = amdgpu_vm_cpu_set_ptes;
1274                 params.pages_addr = pages_addr;
1275                 return amdgpu_vm_frag_ptes(&params, start, last + 1,
1276                                            addr, flags);
1277         }
1278
1279         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1280
1281         nptes = last - start + 1;
1282
1283         /*
1284          * reserve space for two commands every (1 << BLOCK_SIZE)
1285          *  entries or 2k dwords (whatever is smaller)
1286          *
1287          * The second command is for the shadow pagetables.
1288          */
1289         if (vm->root.base.bo->shadow)
1290                 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1291         else
1292                 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1293
1294         /* padding, etc. */
1295         ndw = 64;
1296
1297         if (pages_addr) {
1298                 /* copy commands needed */
1299                 ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1300
1301                 /* and also PTEs */
1302                 ndw += nptes * 2;
1303
1304                 params.func = amdgpu_vm_do_copy_ptes;
1305
1306         } else {
1307                 /* set page commands needed */
1308                 ndw += ncmds * 10;
1309
1310                 /* extra commands for begin/end fragments */
1311                 ndw += 2 * 10 * adev->vm_manager.fragment_size;
1312
1313                 params.func = amdgpu_vm_do_set_ptes;
1314         }
1315
1316         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1317         if (r)
1318                 return r;
1319
1320         params.ib = &job->ibs[0];
1321
1322         if (pages_addr) {
1323                 uint64_t *pte;
1324                 unsigned i;
1325
1326                 /* Put the PTEs at the end of the IB. */
1327                 i = ndw - nptes * 2;
1328                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1329                 params.src = job->ibs->gpu_addr + i * 4;
1330
1331                 for (i = 0; i < nptes; ++i) {
1332                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1333                                                     AMDGPU_GPU_PAGE_SIZE);
1334                         pte[i] |= flags;
1335                 }
1336                 addr = 0;
1337         }
1338
1339         r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1340         if (r)
1341                 goto error_free;
1342
1343         r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1344                              owner, false);
1345         if (r)
1346                 goto error_free;
1347
1348         r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1349         if (r)
1350                 goto error_free;
1351
1352         r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1353         if (r)
1354                 goto error_free;
1355
1356         amdgpu_ring_pad_ib(ring, params.ib);
1357         WARN_ON(params.ib->length_dw > ndw);
1358         r = amdgpu_job_submit(job, ring, &vm->entity,
1359                               AMDGPU_FENCE_OWNER_VM, &f);
1360         if (r)
1361                 goto error_free;
1362
1363         amdgpu_bo_fence(vm->root.base.bo, f, true);
1364         dma_fence_put(*fence);
1365         *fence = f;
1366         return 0;
1367
1368 error_free:
1369         amdgpu_job_free(job);
1370         return r;
1371 }
1372
1373 /**
1374  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1375  *
1376  * @adev: amdgpu_device pointer
1377  * @exclusive: fence we need to sync to
1378  * @pages_addr: DMA addresses to use for mapping
1379  * @vm: requested vm
1380  * @mapping: mapped range and flags to use for the update
1381  * @flags: HW flags for the mapping
1382  * @nodes: array of drm_mm_nodes with the MC addresses
1383  * @fence: optional resulting fence
1384  *
1385  * Split the mapping into smaller chunks so that each update fits
1386  * into a SDMA IB.
1387  * Returns 0 for success, -EINVAL for failure.
1388  */
1389 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1390                                       struct dma_fence *exclusive,
1391                                       dma_addr_t *pages_addr,
1392                                       struct amdgpu_vm *vm,
1393                                       struct amdgpu_bo_va_mapping *mapping,
1394                                       uint64_t flags,
1395                                       struct drm_mm_node *nodes,
1396                                       struct dma_fence **fence)
1397 {
1398         unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1399         uint64_t pfn, start = mapping->start;
1400         int r;
1401
1402         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1403          * but in case of something, we filter the flags in first place
1404          */
1405         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1406                 flags &= ~AMDGPU_PTE_READABLE;
1407         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1408                 flags &= ~AMDGPU_PTE_WRITEABLE;
1409
1410         flags &= ~AMDGPU_PTE_EXECUTABLE;
1411         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1412
1413         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1414         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1415
1416         if ((mapping->flags & AMDGPU_PTE_PRT) &&
1417             (adev->asic_type >= CHIP_VEGA10)) {
1418                 flags |= AMDGPU_PTE_PRT;
1419                 flags &= ~AMDGPU_PTE_VALID;
1420         }
1421
1422         trace_amdgpu_vm_bo_update(mapping);
1423
1424         pfn = mapping->offset >> PAGE_SHIFT;
1425         if (nodes) {
1426                 while (pfn >= nodes->size) {
1427                         pfn -= nodes->size;
1428                         ++nodes;
1429                 }
1430         }
1431
1432         do {
1433                 dma_addr_t *dma_addr = NULL;
1434                 uint64_t max_entries;
1435                 uint64_t addr, last;
1436
1437                 if (nodes) {
1438                         addr = nodes->start << PAGE_SHIFT;
1439                         max_entries = (nodes->size - pfn) *
1440                                 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1441                 } else {
1442                         addr = 0;
1443                         max_entries = S64_MAX;
1444                 }
1445
1446                 if (pages_addr) {
1447                         uint64_t count;
1448
1449                         max_entries = min(max_entries, 16ull * 1024ull);
1450                         for (count = 1; count < max_entries; ++count) {
1451                                 uint64_t idx = pfn + count;
1452
1453                                 if (pages_addr[idx] !=
1454                                     (pages_addr[idx - 1] + PAGE_SIZE))
1455                                         break;
1456                         }
1457
1458                         if (count < min_linear_pages) {
1459                                 addr = pfn << PAGE_SHIFT;
1460                                 dma_addr = pages_addr;
1461                         } else {
1462                                 addr = pages_addr[pfn];
1463                                 max_entries = count;
1464                         }
1465
1466                 } else if (flags & AMDGPU_PTE_VALID) {
1467                         addr += adev->vm_manager.vram_base_offset;
1468                         addr += pfn << PAGE_SHIFT;
1469                 }
1470
1471                 last = min((uint64_t)mapping->last, start + max_entries - 1);
1472                 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1473                                                 start, last, flags, addr,
1474                                                 fence);
1475                 if (r)
1476                         return r;
1477
1478                 pfn += last - start + 1;
1479                 if (nodes && nodes->size == pfn) {
1480                         pfn = 0;
1481                         ++nodes;
1482                 }
1483                 start = last + 1;
1484
1485         } while (unlikely(start != mapping->last + 1));
1486
1487         return 0;
1488 }
1489
1490 /**
1491  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1492  *
1493  * @adev: amdgpu_device pointer
1494  * @bo_va: requested BO and VM object
1495  * @clear: if true clear the entries
1496  *
1497  * Fill in the page table entries for @bo_va.
1498  * Returns 0 for success, -EINVAL for failure.
1499  */
1500 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1501                         struct amdgpu_bo_va *bo_va,
1502                         bool clear)
1503 {
1504         struct amdgpu_bo *bo = bo_va->base.bo;
1505         struct amdgpu_vm *vm = bo_va->base.vm;
1506         struct amdgpu_bo_va_mapping *mapping;
1507         dma_addr_t *pages_addr = NULL;
1508         struct ttm_mem_reg *mem;
1509         struct drm_mm_node *nodes;
1510         struct dma_fence *exclusive, **last_update;
1511         uint64_t flags;
1512         int r;
1513
1514         if (clear || !bo_va->base.bo) {
1515                 mem = NULL;
1516                 nodes = NULL;
1517                 exclusive = NULL;
1518         } else {
1519                 struct ttm_dma_tt *ttm;
1520
1521                 mem = &bo_va->base.bo->tbo.mem;
1522                 nodes = mem->mm_node;
1523                 if (mem->mem_type == TTM_PL_TT) {
1524                         ttm = container_of(bo_va->base.bo->tbo.ttm,
1525                                            struct ttm_dma_tt, ttm);
1526                         pages_addr = ttm->dma_address;
1527                 }
1528                 exclusive = reservation_object_get_excl(bo->tbo.resv);
1529         }
1530
1531         if (bo)
1532                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1533         else
1534                 flags = 0x0;
1535
1536         if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1537                 last_update = &vm->last_update;
1538         else
1539                 last_update = &bo_va->last_pt_update;
1540
1541         if (!clear && bo_va->base.moved) {
1542                 bo_va->base.moved = false;
1543                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1544
1545         } else if (bo_va->cleared != clear) {
1546                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1547         }
1548
1549         list_for_each_entry(mapping, &bo_va->invalids, list) {
1550                 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1551                                                mapping, flags, nodes,
1552                                                last_update);
1553                 if (r)
1554                         return r;
1555         }
1556
1557         if (vm->use_cpu_for_update) {
1558                 /* Flush HDP */
1559                 mb();
1560                 amdgpu_asic_flush_hdp(adev, NULL);
1561         }
1562
1563         spin_lock(&vm->status_lock);
1564         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1565                 unsigned mem_type = bo->tbo.mem.mem_type;
1566
1567                 /* If the BO is not in its preferred location add it back to
1568                  * the evicted list so that it gets validated again on the
1569                  * next command submission.
1570                  */
1571                 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1572                         list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1573                 else
1574                         list_del_init(&bo_va->base.vm_status);
1575         } else {
1576                 list_del_init(&bo_va->base.vm_status);
1577         }
1578         spin_unlock(&vm->status_lock);
1579
1580         list_splice_init(&bo_va->invalids, &bo_va->valids);
1581         bo_va->cleared = clear;
1582
1583         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1584                 list_for_each_entry(mapping, &bo_va->valids, list)
1585                         trace_amdgpu_vm_bo_mapping(mapping);
1586         }
1587
1588         return 0;
1589 }
1590
1591 /**
1592  * amdgpu_vm_update_prt_state - update the global PRT state
1593  */
1594 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1595 {
1596         unsigned long flags;
1597         bool enable;
1598
1599         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1600         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1601         adev->gmc.gmc_funcs->set_prt(adev, enable);
1602         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1603 }
1604
1605 /**
1606  * amdgpu_vm_prt_get - add a PRT user
1607  */
1608 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1609 {
1610         if (!adev->gmc.gmc_funcs->set_prt)
1611                 return;
1612
1613         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1614                 amdgpu_vm_update_prt_state(adev);
1615 }
1616
1617 /**
1618  * amdgpu_vm_prt_put - drop a PRT user
1619  */
1620 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1621 {
1622         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1623                 amdgpu_vm_update_prt_state(adev);
1624 }
1625
1626 /**
1627  * amdgpu_vm_prt_cb - callback for updating the PRT status
1628  */
1629 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1630 {
1631         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1632
1633         amdgpu_vm_prt_put(cb->adev);
1634         kfree(cb);
1635 }
1636
1637 /**
1638  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1639  */
1640 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1641                                  struct dma_fence *fence)
1642 {
1643         struct amdgpu_prt_cb *cb;
1644
1645         if (!adev->gmc.gmc_funcs->set_prt)
1646                 return;
1647
1648         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1649         if (!cb) {
1650                 /* Last resort when we are OOM */
1651                 if (fence)
1652                         dma_fence_wait(fence, false);
1653
1654                 amdgpu_vm_prt_put(adev);
1655         } else {
1656                 cb->adev = adev;
1657                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1658                                                      amdgpu_vm_prt_cb))
1659                         amdgpu_vm_prt_cb(fence, &cb->cb);
1660         }
1661 }
1662
1663 /**
1664  * amdgpu_vm_free_mapping - free a mapping
1665  *
1666  * @adev: amdgpu_device pointer
1667  * @vm: requested vm
1668  * @mapping: mapping to be freed
1669  * @fence: fence of the unmap operation
1670  *
1671  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1672  */
1673 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1674                                    struct amdgpu_vm *vm,
1675                                    struct amdgpu_bo_va_mapping *mapping,
1676                                    struct dma_fence *fence)
1677 {
1678         if (mapping->flags & AMDGPU_PTE_PRT)
1679                 amdgpu_vm_add_prt_cb(adev, fence);
1680         kfree(mapping);
1681 }
1682
1683 /**
1684  * amdgpu_vm_prt_fini - finish all prt mappings
1685  *
1686  * @adev: amdgpu_device pointer
1687  * @vm: requested vm
1688  *
1689  * Register a cleanup callback to disable PRT support after VM dies.
1690  */
1691 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1692 {
1693         struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1694         struct dma_fence *excl, **shared;
1695         unsigned i, shared_count;
1696         int r;
1697
1698         r = reservation_object_get_fences_rcu(resv, &excl,
1699                                               &shared_count, &shared);
1700         if (r) {
1701                 /* Not enough memory to grab the fence list, as last resort
1702                  * block for all the fences to complete.
1703                  */
1704                 reservation_object_wait_timeout_rcu(resv, true, false,
1705                                                     MAX_SCHEDULE_TIMEOUT);
1706                 return;
1707         }
1708
1709         /* Add a callback for each fence in the reservation object */
1710         amdgpu_vm_prt_get(adev);
1711         amdgpu_vm_add_prt_cb(adev, excl);
1712
1713         for (i = 0; i < shared_count; ++i) {
1714                 amdgpu_vm_prt_get(adev);
1715                 amdgpu_vm_add_prt_cb(adev, shared[i]);
1716         }
1717
1718         kfree(shared);
1719 }
1720
1721 /**
1722  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1723  *
1724  * @adev: amdgpu_device pointer
1725  * @vm: requested vm
1726  * @fence: optional resulting fence (unchanged if no work needed to be done
1727  * or if an error occurred)
1728  *
1729  * Make sure all freed BOs are cleared in the PT.
1730  * Returns 0 for success.
1731  *
1732  * PTs have to be reserved and mutex must be locked!
1733  */
1734 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1735                           struct amdgpu_vm *vm,
1736                           struct dma_fence **fence)
1737 {
1738         struct amdgpu_bo_va_mapping *mapping;
1739         uint64_t init_pte_value = 0;
1740         struct dma_fence *f = NULL;
1741         int r;
1742
1743         while (!list_empty(&vm->freed)) {
1744                 mapping = list_first_entry(&vm->freed,
1745                         struct amdgpu_bo_va_mapping, list);
1746                 list_del(&mapping->list);
1747
1748                 if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1749                         init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1750
1751                 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1752                                                 mapping->start, mapping->last,
1753                                                 init_pte_value, 0, &f);
1754                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1755                 if (r) {
1756                         dma_fence_put(f);
1757                         return r;
1758                 }
1759         }
1760
1761         if (fence && f) {
1762                 dma_fence_put(*fence);
1763                 *fence = f;
1764         } else {
1765                 dma_fence_put(f);
1766         }
1767
1768         return 0;
1769
1770 }
1771
1772 /**
1773  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1774  *
1775  * @adev: amdgpu_device pointer
1776  * @vm: requested vm
1777  * @sync: sync object to add fences to
1778  *
1779  * Make sure all BOs which are moved are updated in the PTs.
1780  * Returns 0 for success.
1781  *
1782  * PTs have to be reserved!
1783  */
1784 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1785                            struct amdgpu_vm *vm)
1786 {
1787         bool clear;
1788         int r = 0;
1789
1790         spin_lock(&vm->status_lock);
1791         while (!list_empty(&vm->moved)) {
1792                 struct amdgpu_bo_va *bo_va;
1793                 struct reservation_object *resv;
1794
1795                 bo_va = list_first_entry(&vm->moved,
1796                         struct amdgpu_bo_va, base.vm_status);
1797                 spin_unlock(&vm->status_lock);
1798
1799                 resv = bo_va->base.bo->tbo.resv;
1800
1801                 /* Per VM BOs never need to bo cleared in the page tables */
1802                 if (resv == vm->root.base.bo->tbo.resv)
1803                         clear = false;
1804                 /* Try to reserve the BO to avoid clearing its ptes */
1805                 else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1806                         clear = false;
1807                 /* Somebody else is using the BO right now */
1808                 else
1809                         clear = true;
1810
1811                 r = amdgpu_vm_bo_update(adev, bo_va, clear);
1812                 if (r)
1813                         return r;
1814
1815                 if (!clear && resv != vm->root.base.bo->tbo.resv)
1816                         reservation_object_unlock(resv);
1817
1818                 spin_lock(&vm->status_lock);
1819         }
1820         spin_unlock(&vm->status_lock);
1821
1822         return r;
1823 }
1824
1825 /**
1826  * amdgpu_vm_bo_add - add a bo to a specific vm
1827  *
1828  * @adev: amdgpu_device pointer
1829  * @vm: requested vm
1830  * @bo: amdgpu buffer object
1831  *
1832  * Add @bo into the requested vm.
1833  * Add @bo to the list of bos associated with the vm
1834  * Returns newly added bo_va or NULL for failure
1835  *
1836  * Object has to be reserved!
1837  */
1838 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1839                                       struct amdgpu_vm *vm,
1840                                       struct amdgpu_bo *bo)
1841 {
1842         struct amdgpu_bo_va *bo_va;
1843
1844         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1845         if (bo_va == NULL) {
1846                 return NULL;
1847         }
1848         bo_va->base.vm = vm;
1849         bo_va->base.bo = bo;
1850         INIT_LIST_HEAD(&bo_va->base.bo_list);
1851         INIT_LIST_HEAD(&bo_va->base.vm_status);
1852
1853         bo_va->ref_count = 1;
1854         INIT_LIST_HEAD(&bo_va->valids);
1855         INIT_LIST_HEAD(&bo_va->invalids);
1856
1857         if (!bo)
1858                 return bo_va;
1859
1860         list_add_tail(&bo_va->base.bo_list, &bo->va);
1861
1862         if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
1863                 return bo_va;
1864
1865         if (bo->preferred_domains &
1866             amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
1867                 return bo_va;
1868
1869         /*
1870          * We checked all the prerequisites, but it looks like this per VM BO
1871          * is currently evicted. add the BO to the evicted list to make sure it
1872          * is validated on next VM use to avoid fault.
1873          * */
1874         spin_lock(&vm->status_lock);
1875         list_move_tail(&bo_va->base.vm_status, &vm->evicted);
1876         spin_unlock(&vm->status_lock);
1877
1878         return bo_va;
1879 }
1880
1881
1882 /**
1883  * amdgpu_vm_bo_insert_mapping - insert a new mapping
1884  *
1885  * @adev: amdgpu_device pointer
1886  * @bo_va: bo_va to store the address
1887  * @mapping: the mapping to insert
1888  *
1889  * Insert a new mapping into all structures.
1890  */
1891 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1892                                     struct amdgpu_bo_va *bo_va,
1893                                     struct amdgpu_bo_va_mapping *mapping)
1894 {
1895         struct amdgpu_vm *vm = bo_va->base.vm;
1896         struct amdgpu_bo *bo = bo_va->base.bo;
1897
1898         mapping->bo_va = bo_va;
1899         list_add(&mapping->list, &bo_va->invalids);
1900         amdgpu_vm_it_insert(mapping, &vm->va);
1901
1902         if (mapping->flags & AMDGPU_PTE_PRT)
1903                 amdgpu_vm_prt_get(adev);
1904
1905         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1906                 spin_lock(&vm->status_lock);
1907                 if (list_empty(&bo_va->base.vm_status))
1908                         list_add(&bo_va->base.vm_status, &vm->moved);
1909                 spin_unlock(&vm->status_lock);
1910         }
1911         trace_amdgpu_vm_bo_map(bo_va, mapping);
1912 }
1913
1914 /**
1915  * amdgpu_vm_bo_map - map bo inside a vm
1916  *
1917  * @adev: amdgpu_device pointer
1918  * @bo_va: bo_va to store the address
1919  * @saddr: where to map the BO
1920  * @offset: requested offset in the BO
1921  * @flags: attributes of pages (read/write/valid/etc.)
1922  *
1923  * Add a mapping of the BO at the specefied addr into the VM.
1924  * Returns 0 for success, error for failure.
1925  *
1926  * Object has to be reserved and unreserved outside!
1927  */
1928 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1929                      struct amdgpu_bo_va *bo_va,
1930                      uint64_t saddr, uint64_t offset,
1931                      uint64_t size, uint64_t flags)
1932 {
1933         struct amdgpu_bo_va_mapping *mapping, *tmp;
1934         struct amdgpu_bo *bo = bo_va->base.bo;
1935         struct amdgpu_vm *vm = bo_va->base.vm;
1936         uint64_t eaddr;
1937
1938         /* validate the parameters */
1939         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1940             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1941                 return -EINVAL;
1942
1943         /* make sure object fit at this offset */
1944         eaddr = saddr + size - 1;
1945         if (saddr >= eaddr ||
1946             (bo && offset + size > amdgpu_bo_size(bo)))
1947                 return -EINVAL;
1948
1949         saddr /= AMDGPU_GPU_PAGE_SIZE;
1950         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1951
1952         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1953         if (tmp) {
1954                 /* bo and tmp overlap, invalid addr */
1955                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1956                         "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1957                         tmp->start, tmp->last + 1);
1958                 return -EINVAL;
1959         }
1960
1961         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1962         if (!mapping)
1963                 return -ENOMEM;
1964
1965         mapping->start = saddr;
1966         mapping->last = eaddr;
1967         mapping->offset = offset;
1968         mapping->flags = flags;
1969
1970         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1971
1972         return 0;
1973 }
1974
1975 /**
1976  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1977  *
1978  * @adev: amdgpu_device pointer
1979  * @bo_va: bo_va to store the address
1980  * @saddr: where to map the BO
1981  * @offset: requested offset in the BO
1982  * @flags: attributes of pages (read/write/valid/etc.)
1983  *
1984  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1985  * mappings as we do so.
1986  * Returns 0 for success, error for failure.
1987  *
1988  * Object has to be reserved and unreserved outside!
1989  */
1990 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1991                              struct amdgpu_bo_va *bo_va,
1992                              uint64_t saddr, uint64_t offset,
1993                              uint64_t size, uint64_t flags)
1994 {
1995         struct amdgpu_bo_va_mapping *mapping;
1996         struct amdgpu_bo *bo = bo_va->base.bo;
1997         uint64_t eaddr;
1998         int r;
1999
2000         /* validate the parameters */
2001         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2002             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2003                 return -EINVAL;
2004
2005         /* make sure object fit at this offset */
2006         eaddr = saddr + size - 1;
2007         if (saddr >= eaddr ||
2008             (bo && offset + size > amdgpu_bo_size(bo)))
2009                 return -EINVAL;
2010
2011         /* Allocate all the needed memory */
2012         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2013         if (!mapping)
2014                 return -ENOMEM;
2015
2016         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2017         if (r) {
2018                 kfree(mapping);
2019                 return r;
2020         }
2021
2022         saddr /= AMDGPU_GPU_PAGE_SIZE;
2023         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2024
2025         mapping->start = saddr;
2026         mapping->last = eaddr;
2027         mapping->offset = offset;
2028         mapping->flags = flags;
2029
2030         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2031
2032         return 0;
2033 }
2034
2035 /**
2036  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2037  *
2038  * @adev: amdgpu_device pointer
2039  * @bo_va: bo_va to remove the address from
2040  * @saddr: where to the BO is mapped
2041  *
2042  * Remove a mapping of the BO at the specefied addr from the VM.
2043  * Returns 0 for success, error for failure.
2044  *
2045  * Object has to be reserved and unreserved outside!
2046  */
2047 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2048                        struct amdgpu_bo_va *bo_va,
2049                        uint64_t saddr)
2050 {
2051         struct amdgpu_bo_va_mapping *mapping;
2052         struct amdgpu_vm *vm = bo_va->base.vm;
2053         bool valid = true;
2054
2055         saddr /= AMDGPU_GPU_PAGE_SIZE;
2056
2057         list_for_each_entry(mapping, &bo_va->valids, list) {
2058                 if (mapping->start == saddr)
2059                         break;
2060         }
2061
2062         if (&mapping->list == &bo_va->valids) {
2063                 valid = false;
2064
2065                 list_for_each_entry(mapping, &bo_va->invalids, list) {
2066                         if (mapping->start == saddr)
2067                                 break;
2068                 }
2069
2070                 if (&mapping->list == &bo_va->invalids)
2071                         return -ENOENT;
2072         }
2073
2074         list_del(&mapping->list);
2075         amdgpu_vm_it_remove(mapping, &vm->va);
2076         mapping->bo_va = NULL;
2077         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2078
2079         if (valid)
2080                 list_add(&mapping->list, &vm->freed);
2081         else
2082                 amdgpu_vm_free_mapping(adev, vm, mapping,
2083                                        bo_va->last_pt_update);
2084
2085         return 0;
2086 }
2087
2088 /**
2089  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2090  *
2091  * @adev: amdgpu_device pointer
2092  * @vm: VM structure to use
2093  * @saddr: start of the range
2094  * @size: size of the range
2095  *
2096  * Remove all mappings in a range, split them as appropriate.
2097  * Returns 0 for success, error for failure.
2098  */
2099 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2100                                 struct amdgpu_vm *vm,
2101                                 uint64_t saddr, uint64_t size)
2102 {
2103         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2104         LIST_HEAD(removed);
2105         uint64_t eaddr;
2106
2107         eaddr = saddr + size - 1;
2108         saddr /= AMDGPU_GPU_PAGE_SIZE;
2109         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2110
2111         /* Allocate all the needed memory */
2112         before = kzalloc(sizeof(*before), GFP_KERNEL);
2113         if (!before)
2114                 return -ENOMEM;
2115         INIT_LIST_HEAD(&before->list);
2116
2117         after = kzalloc(sizeof(*after), GFP_KERNEL);
2118         if (!after) {
2119                 kfree(before);
2120                 return -ENOMEM;
2121         }
2122         INIT_LIST_HEAD(&after->list);
2123
2124         /* Now gather all removed mappings */
2125         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2126         while (tmp) {
2127                 /* Remember mapping split at the start */
2128                 if (tmp->start < saddr) {
2129                         before->start = tmp->start;
2130                         before->last = saddr - 1;
2131                         before->offset = tmp->offset;
2132                         before->flags = tmp->flags;
2133                         list_add(&before->list, &tmp->list);
2134                 }
2135
2136                 /* Remember mapping split at the end */
2137                 if (tmp->last > eaddr) {
2138                         after->start = eaddr + 1;
2139                         after->last = tmp->last;
2140                         after->offset = tmp->offset;
2141                         after->offset += after->start - tmp->start;
2142                         after->flags = tmp->flags;
2143                         list_add(&after->list, &tmp->list);
2144                 }
2145
2146                 list_del(&tmp->list);
2147                 list_add(&tmp->list, &removed);
2148
2149                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2150         }
2151
2152         /* And free them up */
2153         list_for_each_entry_safe(tmp, next, &removed, list) {
2154                 amdgpu_vm_it_remove(tmp, &vm->va);
2155                 list_del(&tmp->list);
2156
2157                 if (tmp->start < saddr)
2158                     tmp->start = saddr;
2159                 if (tmp->last > eaddr)
2160                     tmp->last = eaddr;
2161
2162                 tmp->bo_va = NULL;
2163                 list_add(&tmp->list, &vm->freed);
2164                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2165         }
2166
2167         /* Insert partial mapping before the range */
2168         if (!list_empty(&before->list)) {
2169                 amdgpu_vm_it_insert(before, &vm->va);
2170                 if (before->flags & AMDGPU_PTE_PRT)
2171                         amdgpu_vm_prt_get(adev);
2172         } else {
2173                 kfree(before);
2174         }
2175
2176         /* Insert partial mapping after the range */
2177         if (!list_empty(&after->list)) {
2178                 amdgpu_vm_it_insert(after, &vm->va);
2179                 if (after->flags & AMDGPU_PTE_PRT)
2180                         amdgpu_vm_prt_get(adev);
2181         } else {
2182                 kfree(after);
2183         }
2184
2185         return 0;
2186 }
2187
2188 /**
2189  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2190  *
2191  * @vm: the requested VM
2192  *
2193  * Find a mapping by it's address.
2194  */
2195 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2196                                                          uint64_t addr)
2197 {
2198         return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2199 }
2200
2201 /**
2202  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2203  *
2204  * @adev: amdgpu_device pointer
2205  * @bo_va: requested bo_va
2206  *
2207  * Remove @bo_va->bo from the requested vm.
2208  *
2209  * Object have to be reserved!
2210  */
2211 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2212                       struct amdgpu_bo_va *bo_va)
2213 {
2214         struct amdgpu_bo_va_mapping *mapping, *next;
2215         struct amdgpu_vm *vm = bo_va->base.vm;
2216
2217         list_del(&bo_va->base.bo_list);
2218
2219         spin_lock(&vm->status_lock);
2220         list_del(&bo_va->base.vm_status);
2221         spin_unlock(&vm->status_lock);
2222
2223         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2224                 list_del(&mapping->list);
2225                 amdgpu_vm_it_remove(mapping, &vm->va);
2226                 mapping->bo_va = NULL;
2227                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2228                 list_add(&mapping->list, &vm->freed);
2229         }
2230         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2231                 list_del(&mapping->list);
2232                 amdgpu_vm_it_remove(mapping, &vm->va);
2233                 amdgpu_vm_free_mapping(adev, vm, mapping,
2234                                        bo_va->last_pt_update);
2235         }
2236
2237         dma_fence_put(bo_va->last_pt_update);
2238         kfree(bo_va);
2239 }
2240
2241 /**
2242  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2243  *
2244  * @adev: amdgpu_device pointer
2245  * @vm: requested vm
2246  * @bo: amdgpu buffer object
2247  *
2248  * Mark @bo as invalid.
2249  */
2250 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2251                              struct amdgpu_bo *bo, bool evicted)
2252 {
2253         struct amdgpu_vm_bo_base *bo_base;
2254
2255         list_for_each_entry(bo_base, &bo->va, bo_list) {
2256                 struct amdgpu_vm *vm = bo_base->vm;
2257
2258                 bo_base->moved = true;
2259                 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2260                         spin_lock(&bo_base->vm->status_lock);
2261                         if (bo->tbo.type == ttm_bo_type_kernel)
2262                                 list_move(&bo_base->vm_status, &vm->evicted);
2263                         else
2264                                 list_move_tail(&bo_base->vm_status,
2265                                                &vm->evicted);
2266                         spin_unlock(&bo_base->vm->status_lock);
2267                         continue;
2268                 }
2269
2270                 if (bo->tbo.type == ttm_bo_type_kernel) {
2271                         spin_lock(&bo_base->vm->status_lock);
2272                         if (list_empty(&bo_base->vm_status))
2273                                 list_add(&bo_base->vm_status, &vm->relocated);
2274                         spin_unlock(&bo_base->vm->status_lock);
2275                         continue;
2276                 }
2277
2278                 spin_lock(&bo_base->vm->status_lock);
2279                 if (list_empty(&bo_base->vm_status))
2280                         list_add(&bo_base->vm_status, &vm->moved);
2281                 spin_unlock(&bo_base->vm->status_lock);
2282         }
2283 }
2284
2285 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2286 {
2287         /* Total bits covered by PD + PTs */
2288         unsigned bits = ilog2(vm_size) + 18;
2289
2290         /* Make sure the PD is 4K in size up to 8GB address space.
2291            Above that split equal between PD and PTs */
2292         if (vm_size <= 8)
2293                 return (bits - 9);
2294         else
2295                 return ((bits + 3) / 2);
2296 }
2297
2298 /**
2299  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2300  *
2301  * @adev: amdgpu_device pointer
2302  * @vm_size: the default vm size if it's set auto
2303  */
2304 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2305                            uint32_t fragment_size_default, unsigned max_level,
2306                            unsigned max_bits)
2307 {
2308         uint64_t tmp;
2309
2310         /* adjust vm size first */
2311         if (amdgpu_vm_size != -1) {
2312                 unsigned max_size = 1 << (max_bits - 30);
2313
2314                 vm_size = amdgpu_vm_size;
2315                 if (vm_size > max_size) {
2316                         dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2317                                  amdgpu_vm_size, max_size);
2318                         vm_size = max_size;
2319                 }
2320         }
2321
2322         adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2323
2324         tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2325         if (amdgpu_vm_block_size != -1)
2326                 tmp >>= amdgpu_vm_block_size - 9;
2327         tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2328         adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2329         switch (adev->vm_manager.num_level) {
2330         case 3:
2331                 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2332                 break;
2333         case 2:
2334                 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2335                 break;
2336         case 1:
2337                 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2338                 break;
2339         default:
2340                 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2341         }
2342         /* block size depends on vm size and hw setup*/
2343         if (amdgpu_vm_block_size != -1)
2344                 adev->vm_manager.block_size =
2345                         min((unsigned)amdgpu_vm_block_size, max_bits
2346                             - AMDGPU_GPU_PAGE_SHIFT
2347                             - 9 * adev->vm_manager.num_level);
2348         else if (adev->vm_manager.num_level > 1)
2349                 adev->vm_manager.block_size = 9;
2350         else
2351                 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2352
2353         if (amdgpu_vm_fragment_size == -1)
2354                 adev->vm_manager.fragment_size = fragment_size_default;
2355         else
2356                 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2357
2358         DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2359                  vm_size, adev->vm_manager.num_level + 1,
2360                  adev->vm_manager.block_size,
2361                  adev->vm_manager.fragment_size);
2362 }
2363
2364 /**
2365  * amdgpu_vm_init - initialize a vm instance
2366  *
2367  * @adev: amdgpu_device pointer
2368  * @vm: requested vm
2369  * @vm_context: Indicates if it GFX or Compute context
2370  *
2371  * Init @vm fields.
2372  */
2373 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2374                    int vm_context, unsigned int pasid)
2375 {
2376         struct amdgpu_bo_param bp;
2377         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2378                 AMDGPU_VM_PTE_COUNT(adev) * 8);
2379         unsigned ring_instance;
2380         struct amdgpu_ring *ring;
2381         struct drm_sched_rq *rq;
2382         unsigned long size;
2383         uint64_t flags;
2384         int r, i;
2385
2386         vm->va = RB_ROOT_CACHED;
2387         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2388                 vm->reserved_vmid[i] = NULL;
2389         spin_lock_init(&vm->status_lock);
2390         INIT_LIST_HEAD(&vm->evicted);
2391         INIT_LIST_HEAD(&vm->relocated);
2392         INIT_LIST_HEAD(&vm->moved);
2393         INIT_LIST_HEAD(&vm->freed);
2394
2395         /* create scheduler entity for page table updates */
2396
2397         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2398         ring_instance %= adev->vm_manager.vm_pte_num_rings;
2399         ring = adev->vm_manager.vm_pte_rings[ring_instance];
2400         rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2401         r = drm_sched_entity_init(&ring->sched, &vm->entity,
2402                                   rq, amdgpu_sched_jobs, NULL);
2403         if (r)
2404                 return r;
2405
2406         vm->pte_support_ats = false;
2407
2408         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2409                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2410                                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2411
2412                 if (adev->asic_type == CHIP_RAVEN)
2413                         vm->pte_support_ats = true;
2414         } else {
2415                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2416                                                 AMDGPU_VM_USE_CPU_FOR_GFX);
2417         }
2418         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2419                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2420         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2421                   "CPU update of VM recommended only for large BAR system\n");
2422         vm->last_update = NULL;
2423
2424         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2425         if (vm->use_cpu_for_update)
2426                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2427         else
2428                 flags |= AMDGPU_GEM_CREATE_SHADOW;
2429
2430         size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2431         memset(&bp, 0, sizeof(bp));
2432         bp.size = size;
2433         bp.byte_align = align;
2434         bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2435         bp.flags = flags;
2436         bp.type = ttm_bo_type_kernel;
2437         bp.resv = NULL;
2438         r = amdgpu_bo_create(adev, &bp, &vm->root.base.bo);
2439         if (r)
2440                 goto error_free_sched_entity;
2441
2442         r = amdgpu_bo_reserve(vm->root.base.bo, true);
2443         if (r)
2444                 goto error_free_root;
2445
2446         r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2447                                adev->vm_manager.root_level,
2448                                vm->pte_support_ats);
2449         if (r)
2450                 goto error_unreserve;
2451
2452         vm->root.base.vm = vm;
2453         list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2454         list_add_tail(&vm->root.base.vm_status, &vm->evicted);
2455         amdgpu_bo_unreserve(vm->root.base.bo);
2456
2457         if (pasid) {
2458                 unsigned long flags;
2459
2460                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2461                 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2462                               GFP_ATOMIC);
2463                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2464                 if (r < 0)
2465                         goto error_free_root;
2466
2467                 vm->pasid = pasid;
2468         }
2469
2470         INIT_KFIFO(vm->faults);
2471         vm->fault_credit = 16;
2472
2473         return 0;
2474
2475 error_unreserve:
2476         amdgpu_bo_unreserve(vm->root.base.bo);
2477
2478 error_free_root:
2479         amdgpu_bo_unref(&vm->root.base.bo->shadow);
2480         amdgpu_bo_unref(&vm->root.base.bo);
2481         vm->root.base.bo = NULL;
2482
2483 error_free_sched_entity:
2484         drm_sched_entity_fini(&ring->sched, &vm->entity);
2485
2486         return r;
2487 }
2488
2489 /**
2490  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2491  *
2492  * This only works on GFX VMs that don't have any BOs added and no
2493  * page tables allocated yet.
2494  *
2495  * Changes the following VM parameters:
2496  * - use_cpu_for_update
2497  * - pte_supports_ats
2498  * - pasid (old PASID is released, because compute manages its own PASIDs)
2499  *
2500  * Reinitializes the page directory to reflect the changed ATS
2501  * setting. May leave behind an unused shadow BO for the page
2502  * directory when switching from SDMA updates to CPU updates.
2503  *
2504  * Returns 0 for success, -errno for errors.
2505  */
2506 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2507 {
2508         bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2509         int r;
2510
2511         r = amdgpu_bo_reserve(vm->root.base.bo, true);
2512         if (r)
2513                 return r;
2514
2515         /* Sanity checks */
2516         if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2517                 r = -EINVAL;
2518                 goto error;
2519         }
2520
2521         /* Check if PD needs to be reinitialized and do it before
2522          * changing any other state, in case it fails.
2523          */
2524         if (pte_support_ats != vm->pte_support_ats) {
2525                 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2526                                adev->vm_manager.root_level,
2527                                pte_support_ats);
2528                 if (r)
2529                         goto error;
2530         }
2531
2532         /* Update VM state */
2533         vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2534                                     AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2535         vm->pte_support_ats = pte_support_ats;
2536         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2537                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2538         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2539                   "CPU update of VM recommended only for large BAR system\n");
2540
2541         if (vm->pasid) {
2542                 unsigned long flags;
2543
2544                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2545                 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2546                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2547
2548                 vm->pasid = 0;
2549         }
2550
2551 error:
2552         amdgpu_bo_unreserve(vm->root.base.bo);
2553         return r;
2554 }
2555
2556 /**
2557  * amdgpu_vm_free_levels - free PD/PT levels
2558  *
2559  * @adev: amdgpu device structure
2560  * @parent: PD/PT starting level to free
2561  * @level: level of parent structure
2562  *
2563  * Free the page directory or page table level and all sub levels.
2564  */
2565 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2566                                   struct amdgpu_vm_pt *parent,
2567                                   unsigned level)
2568 {
2569         unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2570
2571         if (parent->base.bo) {
2572                 list_del(&parent->base.bo_list);
2573                 list_del(&parent->base.vm_status);
2574                 amdgpu_bo_unref(&parent->base.bo->shadow);
2575                 amdgpu_bo_unref(&parent->base.bo);
2576         }
2577
2578         if (parent->entries)
2579                 for (i = 0; i < num_entries; i++)
2580                         amdgpu_vm_free_levels(adev, &parent->entries[i],
2581                                               level + 1);
2582
2583         kvfree(parent->entries);
2584 }
2585
2586 /**
2587  * amdgpu_vm_fini - tear down a vm instance
2588  *
2589  * @adev: amdgpu_device pointer
2590  * @vm: requested vm
2591  *
2592  * Tear down @vm.
2593  * Unbind the VM and remove all bos from the vm bo list
2594  */
2595 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2596 {
2597         struct amdgpu_bo_va_mapping *mapping, *tmp;
2598         bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2599         struct amdgpu_bo *root;
2600         u64 fault;
2601         int i, r;
2602
2603         amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2604
2605         /* Clear pending page faults from IH when the VM is destroyed */
2606         while (kfifo_get(&vm->faults, &fault))
2607                 amdgpu_ih_clear_fault(adev, fault);
2608
2609         if (vm->pasid) {
2610                 unsigned long flags;
2611
2612                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2613                 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2614                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2615         }
2616
2617         drm_sched_entity_fini(vm->entity.sched, &vm->entity);
2618
2619         if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2620                 dev_err(adev->dev, "still active bo inside vm\n");
2621         }
2622         rbtree_postorder_for_each_entry_safe(mapping, tmp,
2623                                              &vm->va.rb_root, rb) {
2624                 list_del(&mapping->list);
2625                 amdgpu_vm_it_remove(mapping, &vm->va);
2626                 kfree(mapping);
2627         }
2628         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2629                 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2630                         amdgpu_vm_prt_fini(adev, vm);
2631                         prt_fini_needed = false;
2632                 }
2633
2634                 list_del(&mapping->list);
2635                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2636         }
2637
2638         root = amdgpu_bo_ref(vm->root.base.bo);
2639         r = amdgpu_bo_reserve(root, true);
2640         if (r) {
2641                 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2642         } else {
2643                 amdgpu_vm_free_levels(adev, &vm->root,
2644                                       adev->vm_manager.root_level);
2645                 amdgpu_bo_unreserve(root);
2646         }
2647         amdgpu_bo_unref(&root);
2648         dma_fence_put(vm->last_update);
2649         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2650                 amdgpu_vmid_free_reserved(adev, vm, i);
2651 }
2652
2653 /**
2654  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2655  *
2656  * @adev: amdgpu_device pointer
2657  * @pasid: PASID do identify the VM
2658  *
2659  * This function is expected to be called in interrupt context. Returns
2660  * true if there was fault credit, false otherwise
2661  */
2662 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2663                                   unsigned int pasid)
2664 {
2665         struct amdgpu_vm *vm;
2666
2667         spin_lock(&adev->vm_manager.pasid_lock);
2668         vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2669         if (!vm) {
2670                 /* VM not found, can't track fault credit */
2671                 spin_unlock(&adev->vm_manager.pasid_lock);
2672                 return true;
2673         }
2674
2675         /* No lock needed. only accessed by IRQ handler */
2676         if (!vm->fault_credit) {
2677                 /* Too many faults in this VM */
2678                 spin_unlock(&adev->vm_manager.pasid_lock);
2679                 return false;
2680         }
2681
2682         vm->fault_credit--;
2683         spin_unlock(&adev->vm_manager.pasid_lock);
2684         return true;
2685 }
2686
2687 /**
2688  * amdgpu_vm_manager_init - init the VM manager
2689  *
2690  * @adev: amdgpu_device pointer
2691  *
2692  * Initialize the VM manager structures
2693  */
2694 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2695 {
2696         unsigned i;
2697
2698         amdgpu_vmid_mgr_init(adev);
2699
2700         adev->vm_manager.fence_context =
2701                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2702         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2703                 adev->vm_manager.seqno[i] = 0;
2704
2705         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2706         spin_lock_init(&adev->vm_manager.prt_lock);
2707         atomic_set(&adev->vm_manager.num_prt_users, 0);
2708
2709         /* If not overridden by the user, by default, only in large BAR systems
2710          * Compute VM tables will be updated by CPU
2711          */
2712 #ifdef CONFIG_X86_64
2713         if (amdgpu_vm_update_mode == -1) {
2714                 if (amdgpu_vm_is_large_bar(adev))
2715                         adev->vm_manager.vm_update_mode =
2716                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2717                 else
2718                         adev->vm_manager.vm_update_mode = 0;
2719         } else
2720                 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2721 #else
2722         adev->vm_manager.vm_update_mode = 0;
2723 #endif
2724
2725         idr_init(&adev->vm_manager.pasid_idr);
2726         spin_lock_init(&adev->vm_manager.pasid_lock);
2727 }
2728
2729 /**
2730  * amdgpu_vm_manager_fini - cleanup VM manager
2731  *
2732  * @adev: amdgpu_device pointer
2733  *
2734  * Cleanup the VM manager and free resources.
2735  */
2736 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2737 {
2738         WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
2739         idr_destroy(&adev->vm_manager.pasid_idr);
2740
2741         amdgpu_vmid_mgr_fini(adev);
2742 }
2743
2744 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2745 {
2746         union drm_amdgpu_vm *args = data;
2747         struct amdgpu_device *adev = dev->dev_private;
2748         struct amdgpu_fpriv *fpriv = filp->driver_priv;
2749         int r;
2750
2751         switch (args->in.op) {
2752         case AMDGPU_VM_OP_RESERVE_VMID:
2753                 /* current, we only have requirement to reserve vmid from gfxhub */
2754                 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2755                 if (r)
2756                         return r;
2757                 break;
2758         case AMDGPU_VM_OP_UNRESERVE_VMID:
2759                 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2760                 break;
2761         default:
2762                 return -EINVAL;
2763         }
2764
2765         return 0;
2766 }
This page took 0.243156 seconds and 4 git commands to generate.