]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drm/amdgpu: simplify bo_va list when vm bo update (v2)
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36
37 /*
38  * GPUVM
39  * GPUVM is similar to the legacy gart on older asics, however
40  * rather than there being a single global gart table
41  * for the entire GPU, there are multiple VM page tables active
42  * at any given time.  The VM page tables can contain a mix
43  * vram pages and system memory pages and system memory pages
44  * can be mapped as snooped (cached system pages) or unsnooped
45  * (uncached system pages).
46  * Each VM has an ID associated with it and there is a page table
47  * associated with each VMID.  When execting a command buffer,
48  * the kernel tells the the ring what VMID to use for that command
49  * buffer.  VMIDs are allocated dynamically as commands are submitted.
50  * The userspace drivers maintain their own address space and the kernel
51  * sets up their pages tables accordingly when they submit their
52  * command buffers and a VMID is assigned.
53  * Cayman/Trinity support up to 8 active VMs at any given time;
54  * SI supports 16.
55  */
56
57 #define START(node) ((node)->start)
58 #define LAST(node) ((node)->last)
59
60 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
61                      START, LAST, static, amdgpu_vm_it)
62
63 #undef START
64 #undef LAST
65
66 /* Local structure. Encapsulate some VM table update parameters to reduce
67  * the number of function parameters
68  */
69 struct amdgpu_pte_update_params {
70         /* amdgpu device we do this update for */
71         struct amdgpu_device *adev;
72         /* optional amdgpu_vm we do this update for */
73         struct amdgpu_vm *vm;
74         /* address where to copy page table entries from */
75         uint64_t src;
76         /* indirect buffer to fill with commands */
77         struct amdgpu_ib *ib;
78         /* Function which actually does the update */
79         void (*func)(struct amdgpu_pte_update_params *params,
80                      struct amdgpu_bo *bo, uint64_t pe,
81                      uint64_t addr, unsigned count, uint32_t incr,
82                      uint64_t flags);
83         /* The next two are used during VM update by CPU
84          *  DMA addresses to use for mapping
85          *  Kernel pointer of PD/PT BO that needs to be updated
86          */
87         dma_addr_t *pages_addr;
88         void *kptr;
89 };
90
91 /* Helper to disable partial resident texture feature from a fence callback */
92 struct amdgpu_prt_cb {
93         struct amdgpu_device *adev;
94         struct dma_fence_cb cb;
95 };
96
97 /**
98  * amdgpu_vm_level_shift - return the addr shift for each level
99  *
100  * @adev: amdgpu_device pointer
101  *
102  * Returns the number of bits the pfn needs to be right shifted for a level.
103  */
104 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
105                                       unsigned level)
106 {
107         unsigned shift = 0xff;
108
109         switch (level) {
110         case AMDGPU_VM_PDB2:
111         case AMDGPU_VM_PDB1:
112         case AMDGPU_VM_PDB0:
113                 shift = 9 * (AMDGPU_VM_PDB0 - level) +
114                         adev->vm_manager.block_size;
115                 break;
116         case AMDGPU_VM_PTB:
117                 shift = 0;
118                 break;
119         default:
120                 dev_err(adev->dev, "the level%d isn't supported.\n", level);
121         }
122
123         return shift;
124 }
125
126 /**
127  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
128  *
129  * @adev: amdgpu_device pointer
130  *
131  * Calculate the number of entries in a page directory or page table.
132  */
133 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
134                                       unsigned level)
135 {
136         unsigned shift = amdgpu_vm_level_shift(adev,
137                                                adev->vm_manager.root_level);
138
139         if (level == adev->vm_manager.root_level)
140                 /* For the root directory */
141                 return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
142         else if (level != AMDGPU_VM_PTB)
143                 /* Everything in between */
144                 return 512;
145         else
146                 /* For the page tables on the leaves */
147                 return AMDGPU_VM_PTE_COUNT(adev);
148 }
149
150 /**
151  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
152  *
153  * @adev: amdgpu_device pointer
154  *
155  * Calculate the size of the BO for a page directory or page table in bytes.
156  */
157 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
158 {
159         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
160 }
161
162 /**
163  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
164  *
165  * @vm: vm providing the BOs
166  * @validated: head of validation list
167  * @entry: entry to add
168  *
169  * Add the page directory to the list of BOs to
170  * validate for command submission.
171  */
172 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
173                          struct list_head *validated,
174                          struct amdgpu_bo_list_entry *entry)
175 {
176         entry->robj = vm->root.base.bo;
177         entry->priority = 0;
178         entry->tv.bo = &entry->robj->tbo;
179         entry->tv.shared = true;
180         entry->user_pages = NULL;
181         list_add(&entry->tv.head, validated);
182 }
183
184 /**
185  * amdgpu_vm_validate_pt_bos - validate the page table BOs
186  *
187  * @adev: amdgpu device pointer
188  * @vm: vm providing the BOs
189  * @validate: callback to do the validation
190  * @param: parameter for the validation callback
191  *
192  * Validate the page table BOs on command submission if neccessary.
193  */
194 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
195                               int (*validate)(void *p, struct amdgpu_bo *bo),
196                               void *param)
197 {
198         struct ttm_bo_global *glob = adev->mman.bdev.glob;
199         int r;
200
201         spin_lock(&vm->status_lock);
202         while (!list_empty(&vm->evicted)) {
203                 struct amdgpu_vm_bo_base *bo_base;
204                 struct amdgpu_bo *bo;
205
206                 bo_base = list_first_entry(&vm->evicted,
207                                            struct amdgpu_vm_bo_base,
208                                            vm_status);
209                 spin_unlock(&vm->status_lock);
210
211                 bo = bo_base->bo;
212                 BUG_ON(!bo);
213                 if (bo->parent) {
214                         r = validate(param, bo);
215                         if (r)
216                                 return r;
217
218                         spin_lock(&glob->lru_lock);
219                         ttm_bo_move_to_lru_tail(&bo->tbo);
220                         if (bo->shadow)
221                                 ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
222                         spin_unlock(&glob->lru_lock);
223                 }
224
225                 if (bo->tbo.type == ttm_bo_type_kernel &&
226                     vm->use_cpu_for_update) {
227                         r = amdgpu_bo_kmap(bo, NULL);
228                         if (r)
229                                 return r;
230                 }
231
232                 spin_lock(&vm->status_lock);
233                 if (bo->tbo.type != ttm_bo_type_kernel)
234                         list_move(&bo_base->vm_status, &vm->moved);
235                 else
236                         list_move(&bo_base->vm_status, &vm->relocated);
237         }
238         spin_unlock(&vm->status_lock);
239
240         return 0;
241 }
242
243 /**
244  * amdgpu_vm_ready - check VM is ready for updates
245  *
246  * @vm: VM to check
247  *
248  * Check if all VM PDs/PTs are ready for updates
249  */
250 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
251 {
252         bool ready;
253
254         spin_lock(&vm->status_lock);
255         ready = list_empty(&vm->evicted);
256         spin_unlock(&vm->status_lock);
257
258         return ready;
259 }
260
261 /**
262  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
263  *
264  * @adev: amdgpu_device pointer
265  * @bo: BO to clear
266  * @level: level this BO is at
267  *
268  * Root PD needs to be reserved when calling this.
269  */
270 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
271                               struct amdgpu_vm *vm, struct amdgpu_bo *bo,
272                               unsigned level, bool pte_support_ats)
273 {
274         struct ttm_operation_ctx ctx = { true, false };
275         struct dma_fence *fence = NULL;
276         unsigned entries, ats_entries;
277         struct amdgpu_ring *ring;
278         struct amdgpu_job *job;
279         uint64_t addr;
280         int r;
281
282         addr = amdgpu_bo_gpu_offset(bo);
283         entries = amdgpu_bo_size(bo) / 8;
284
285         if (pte_support_ats) {
286                 if (level == adev->vm_manager.root_level) {
287                         ats_entries = amdgpu_vm_level_shift(adev, level);
288                         ats_entries += AMDGPU_GPU_PAGE_SHIFT;
289                         ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
290                         ats_entries = min(ats_entries, entries);
291                         entries -= ats_entries;
292                 } else {
293                         ats_entries = entries;
294                         entries = 0;
295                 }
296         } else {
297                 ats_entries = 0;
298         }
299
300         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
301
302         r = reservation_object_reserve_shared(bo->tbo.resv);
303         if (r)
304                 return r;
305
306         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
307         if (r)
308                 goto error;
309
310         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
311         if (r)
312                 goto error;
313
314         if (ats_entries) {
315                 uint64_t ats_value;
316
317                 ats_value = AMDGPU_PTE_DEFAULT_ATC;
318                 if (level != AMDGPU_VM_PTB)
319                         ats_value |= AMDGPU_PDE_PTE;
320
321                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
322                                       ats_entries, 0, ats_value);
323                 addr += ats_entries * 8;
324         }
325
326         if (entries)
327                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
328                                       entries, 0, 0);
329
330         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
331
332         WARN_ON(job->ibs[0].length_dw > 64);
333         r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
334                              AMDGPU_FENCE_OWNER_UNDEFINED, false);
335         if (r)
336                 goto error_free;
337
338         r = amdgpu_job_submit(job, ring, &vm->entity,
339                               AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
340         if (r)
341                 goto error_free;
342
343         amdgpu_bo_fence(bo, fence, true);
344         dma_fence_put(fence);
345
346         if (bo->shadow)
347                 return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
348                                           level, pte_support_ats);
349
350         return 0;
351
352 error_free:
353         amdgpu_job_free(job);
354
355 error:
356         return r;
357 }
358
359 /**
360  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
361  *
362  * @adev: amdgpu_device pointer
363  * @vm: requested vm
364  * @saddr: start of the address range
365  * @eaddr: end of the address range
366  *
367  * Make sure the page directories and page tables are allocated
368  */
369 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
370                                   struct amdgpu_vm *vm,
371                                   struct amdgpu_vm_pt *parent,
372                                   uint64_t saddr, uint64_t eaddr,
373                                   unsigned level, bool ats)
374 {
375         unsigned shift = amdgpu_vm_level_shift(adev, level);
376         unsigned pt_idx, from, to;
377         u64 flags;
378         int r;
379
380         if (!parent->entries) {
381                 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
382
383                 parent->entries = kvmalloc_array(num_entries,
384                                                    sizeof(struct amdgpu_vm_pt),
385                                                    GFP_KERNEL | __GFP_ZERO);
386                 if (!parent->entries)
387                         return -ENOMEM;
388                 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
389         }
390
391         from = saddr >> shift;
392         to = eaddr >> shift;
393         if (from >= amdgpu_vm_num_entries(adev, level) ||
394             to >= amdgpu_vm_num_entries(adev, level))
395                 return -EINVAL;
396
397         ++level;
398         saddr = saddr & ((1 << shift) - 1);
399         eaddr = eaddr & ((1 << shift) - 1);
400
401         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
402         if (vm->use_cpu_for_update)
403                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
404         else
405                 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
406                                 AMDGPU_GEM_CREATE_SHADOW);
407
408         /* walk over the address space and allocate the page tables */
409         for (pt_idx = from; pt_idx <= to; ++pt_idx) {
410                 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
411                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
412                 struct amdgpu_bo *pt;
413
414                 if (!entry->base.bo) {
415                         struct amdgpu_bo_param bp;
416
417                         memset(&bp, 0, sizeof(bp));
418                         bp.size = amdgpu_vm_bo_size(adev, level);
419                         bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
420                         bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
421                         bp.flags = flags;
422                         bp.type = ttm_bo_type_kernel;
423                         bp.resv = resv;
424                         r = amdgpu_bo_create(adev, &bp, &pt);
425                         if (r)
426                                 return r;
427
428                         r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
429                         if (r) {
430                                 amdgpu_bo_unref(&pt->shadow);
431                                 amdgpu_bo_unref(&pt);
432                                 return r;
433                         }
434
435                         if (vm->use_cpu_for_update) {
436                                 r = amdgpu_bo_kmap(pt, NULL);
437                                 if (r) {
438                                         amdgpu_bo_unref(&pt->shadow);
439                                         amdgpu_bo_unref(&pt);
440                                         return r;
441                                 }
442                         }
443
444                         /* Keep a reference to the root directory to avoid
445                         * freeing them up in the wrong order.
446                         */
447                         pt->parent = amdgpu_bo_ref(parent->base.bo);
448
449                         entry->base.vm = vm;
450                         entry->base.bo = pt;
451                         list_add_tail(&entry->base.bo_list, &pt->va);
452                         spin_lock(&vm->status_lock);
453                         list_add(&entry->base.vm_status, &vm->relocated);
454                         spin_unlock(&vm->status_lock);
455                 }
456
457                 if (level < AMDGPU_VM_PTB) {
458                         uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
459                         uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
460                                 ((1 << shift) - 1);
461                         r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
462                                                    sub_eaddr, level, ats);
463                         if (r)
464                                 return r;
465                 }
466         }
467
468         return 0;
469 }
470
471 /**
472  * amdgpu_vm_alloc_pts - Allocate page tables.
473  *
474  * @adev: amdgpu_device pointer
475  * @vm: VM to allocate page tables for
476  * @saddr: Start address which needs to be allocated
477  * @size: Size from start address we need.
478  *
479  * Make sure the page tables are allocated.
480  */
481 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
482                         struct amdgpu_vm *vm,
483                         uint64_t saddr, uint64_t size)
484 {
485         uint64_t eaddr;
486         bool ats = false;
487
488         /* validate the parameters */
489         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
490                 return -EINVAL;
491
492         eaddr = saddr + size - 1;
493
494         if (vm->pte_support_ats)
495                 ats = saddr < AMDGPU_VA_HOLE_START;
496
497         saddr /= AMDGPU_GPU_PAGE_SIZE;
498         eaddr /= AMDGPU_GPU_PAGE_SIZE;
499
500         if (eaddr >= adev->vm_manager.max_pfn) {
501                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
502                         eaddr, adev->vm_manager.max_pfn);
503                 return -EINVAL;
504         }
505
506         return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
507                                       adev->vm_manager.root_level, ats);
508 }
509
510 /**
511  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
512  *
513  * @adev: amdgpu_device pointer
514  */
515 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
516 {
517         const struct amdgpu_ip_block *ip_block;
518         bool has_compute_vm_bug;
519         struct amdgpu_ring *ring;
520         int i;
521
522         has_compute_vm_bug = false;
523
524         ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
525         if (ip_block) {
526                 /* Compute has a VM bug for GFX version < 7.
527                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
528                 if (ip_block->version->major <= 7)
529                         has_compute_vm_bug = true;
530                 else if (ip_block->version->major == 8)
531                         if (adev->gfx.mec_fw_version < 673)
532                                 has_compute_vm_bug = true;
533         }
534
535         for (i = 0; i < adev->num_rings; i++) {
536                 ring = adev->rings[i];
537                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
538                         /* only compute rings */
539                         ring->has_compute_vm_bug = has_compute_vm_bug;
540                 else
541                         ring->has_compute_vm_bug = false;
542         }
543 }
544
545 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
546                                   struct amdgpu_job *job)
547 {
548         struct amdgpu_device *adev = ring->adev;
549         unsigned vmhub = ring->funcs->vmhub;
550         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
551         struct amdgpu_vmid *id;
552         bool gds_switch_needed;
553         bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
554
555         if (job->vmid == 0)
556                 return false;
557         id = &id_mgr->ids[job->vmid];
558         gds_switch_needed = ring->funcs->emit_gds_switch && (
559                 id->gds_base != job->gds_base ||
560                 id->gds_size != job->gds_size ||
561                 id->gws_base != job->gws_base ||
562                 id->gws_size != job->gws_size ||
563                 id->oa_base != job->oa_base ||
564                 id->oa_size != job->oa_size);
565
566         if (amdgpu_vmid_had_gpu_reset(adev, id))
567                 return true;
568
569         return vm_flush_needed || gds_switch_needed;
570 }
571
572 static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
573 {
574         return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
575 }
576
577 /**
578  * amdgpu_vm_flush - hardware flush the vm
579  *
580  * @ring: ring to use for flush
581  * @vmid: vmid number to use
582  * @pd_addr: address of the page directory
583  *
584  * Emit a VM flush when it is necessary.
585  */
586 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
587 {
588         struct amdgpu_device *adev = ring->adev;
589         unsigned vmhub = ring->funcs->vmhub;
590         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
591         struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
592         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
593                 id->gds_base != job->gds_base ||
594                 id->gds_size != job->gds_size ||
595                 id->gws_base != job->gws_base ||
596                 id->gws_size != job->gws_size ||
597                 id->oa_base != job->oa_base ||
598                 id->oa_size != job->oa_size);
599         bool vm_flush_needed = job->vm_needs_flush;
600         bool pasid_mapping_needed = id->pasid != job->pasid ||
601                 !id->pasid_mapping ||
602                 !dma_fence_is_signaled(id->pasid_mapping);
603         struct dma_fence *fence = NULL;
604         unsigned patch_offset = 0;
605         int r;
606
607         if (amdgpu_vmid_had_gpu_reset(adev, id)) {
608                 gds_switch_needed = true;
609                 vm_flush_needed = true;
610                 pasid_mapping_needed = true;
611         }
612
613         gds_switch_needed &= !!ring->funcs->emit_gds_switch;
614         vm_flush_needed &= !!ring->funcs->emit_vm_flush;
615         pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
616                 ring->funcs->emit_wreg;
617
618         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
619                 return 0;
620
621         if (ring->funcs->init_cond_exec)
622                 patch_offset = amdgpu_ring_init_cond_exec(ring);
623
624         if (need_pipe_sync)
625                 amdgpu_ring_emit_pipeline_sync(ring);
626
627         if (vm_flush_needed) {
628                 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
629                 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
630         }
631
632         if (pasid_mapping_needed)
633                 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
634
635         if (vm_flush_needed || pasid_mapping_needed) {
636                 r = amdgpu_fence_emit(ring, &fence, 0);
637                 if (r)
638                         return r;
639         }
640
641         if (vm_flush_needed) {
642                 mutex_lock(&id_mgr->lock);
643                 dma_fence_put(id->last_flush);
644                 id->last_flush = dma_fence_get(fence);
645                 id->current_gpu_reset_count =
646                         atomic_read(&adev->gpu_reset_counter);
647                 mutex_unlock(&id_mgr->lock);
648         }
649
650         if (pasid_mapping_needed) {
651                 id->pasid = job->pasid;
652                 dma_fence_put(id->pasid_mapping);
653                 id->pasid_mapping = dma_fence_get(fence);
654         }
655         dma_fence_put(fence);
656
657         if (ring->funcs->emit_gds_switch && gds_switch_needed) {
658                 id->gds_base = job->gds_base;
659                 id->gds_size = job->gds_size;
660                 id->gws_base = job->gws_base;
661                 id->gws_size = job->gws_size;
662                 id->oa_base = job->oa_base;
663                 id->oa_size = job->oa_size;
664                 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
665                                             job->gds_size, job->gws_base,
666                                             job->gws_size, job->oa_base,
667                                             job->oa_size);
668         }
669
670         if (ring->funcs->patch_cond_exec)
671                 amdgpu_ring_patch_cond_exec(ring, patch_offset);
672
673         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
674         if (ring->funcs->emit_switch_buffer) {
675                 amdgpu_ring_emit_switch_buffer(ring);
676                 amdgpu_ring_emit_switch_buffer(ring);
677         }
678         return 0;
679 }
680
681 /**
682  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
683  *
684  * @vm: requested vm
685  * @bo: requested buffer object
686  *
687  * Find @bo inside the requested vm.
688  * Search inside the @bos vm list for the requested vm
689  * Returns the found bo_va or NULL if none is found
690  *
691  * Object has to be reserved!
692  */
693 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
694                                        struct amdgpu_bo *bo)
695 {
696         struct amdgpu_bo_va *bo_va;
697
698         list_for_each_entry(bo_va, &bo->va, base.bo_list) {
699                 if (bo_va->base.vm == vm) {
700                         return bo_va;
701                 }
702         }
703         return NULL;
704 }
705
706 /**
707  * amdgpu_vm_do_set_ptes - helper to call the right asic function
708  *
709  * @params: see amdgpu_pte_update_params definition
710  * @bo: PD/PT to update
711  * @pe: addr of the page entry
712  * @addr: dst addr to write into pe
713  * @count: number of page entries to update
714  * @incr: increase next addr by incr bytes
715  * @flags: hw access flags
716  *
717  * Traces the parameters and calls the right asic functions
718  * to setup the page table using the DMA.
719  */
720 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
721                                   struct amdgpu_bo *bo,
722                                   uint64_t pe, uint64_t addr,
723                                   unsigned count, uint32_t incr,
724                                   uint64_t flags)
725 {
726         pe += amdgpu_bo_gpu_offset(bo);
727         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
728
729         if (count < 3) {
730                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
731                                     addr | flags, count, incr);
732
733         } else {
734                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
735                                       count, incr, flags);
736         }
737 }
738
739 /**
740  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
741  *
742  * @params: see amdgpu_pte_update_params definition
743  * @bo: PD/PT to update
744  * @pe: addr of the page entry
745  * @addr: dst addr to write into pe
746  * @count: number of page entries to update
747  * @incr: increase next addr by incr bytes
748  * @flags: hw access flags
749  *
750  * Traces the parameters and calls the DMA function to copy the PTEs.
751  */
752 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
753                                    struct amdgpu_bo *bo,
754                                    uint64_t pe, uint64_t addr,
755                                    unsigned count, uint32_t incr,
756                                    uint64_t flags)
757 {
758         uint64_t src = (params->src + (addr >> 12) * 8);
759
760         pe += amdgpu_bo_gpu_offset(bo);
761         trace_amdgpu_vm_copy_ptes(pe, src, count);
762
763         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
764 }
765
766 /**
767  * amdgpu_vm_map_gart - Resolve gart mapping of addr
768  *
769  * @pages_addr: optional DMA address to use for lookup
770  * @addr: the unmapped addr
771  *
772  * Look up the physical address of the page that the pte resolves
773  * to and return the pointer for the page table entry.
774  */
775 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
776 {
777         uint64_t result;
778
779         /* page table offset */
780         result = pages_addr[addr >> PAGE_SHIFT];
781
782         /* in case cpu page size != gpu page size*/
783         result |= addr & (~PAGE_MASK);
784
785         result &= 0xFFFFFFFFFFFFF000ULL;
786
787         return result;
788 }
789
790 /**
791  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
792  *
793  * @params: see amdgpu_pte_update_params definition
794  * @bo: PD/PT to update
795  * @pe: kmap addr of the page entry
796  * @addr: dst addr to write into pe
797  * @count: number of page entries to update
798  * @incr: increase next addr by incr bytes
799  * @flags: hw access flags
800  *
801  * Write count number of PT/PD entries directly.
802  */
803 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
804                                    struct amdgpu_bo *bo,
805                                    uint64_t pe, uint64_t addr,
806                                    unsigned count, uint32_t incr,
807                                    uint64_t flags)
808 {
809         unsigned int i;
810         uint64_t value;
811
812         pe += (unsigned long)amdgpu_bo_kptr(bo);
813
814         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
815
816         for (i = 0; i < count; i++) {
817                 value = params->pages_addr ?
818                         amdgpu_vm_map_gart(params->pages_addr, addr) :
819                         addr;
820                 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
821                                        i, value, flags);
822                 addr += incr;
823         }
824 }
825
826 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
827                              void *owner)
828 {
829         struct amdgpu_sync sync;
830         int r;
831
832         amdgpu_sync_create(&sync);
833         amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
834         r = amdgpu_sync_wait(&sync, true);
835         amdgpu_sync_free(&sync);
836
837         return r;
838 }
839
840 /*
841  * amdgpu_vm_update_pde - update a single level in the hierarchy
842  *
843  * @param: parameters for the update
844  * @vm: requested vm
845  * @parent: parent directory
846  * @entry: entry to update
847  *
848  * Makes sure the requested entry in parent is up to date.
849  */
850 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
851                                  struct amdgpu_vm *vm,
852                                  struct amdgpu_vm_pt *parent,
853                                  struct amdgpu_vm_pt *entry)
854 {
855         struct amdgpu_bo *bo = parent->base.bo, *pbo;
856         uint64_t pde, pt, flags;
857         unsigned level;
858
859         /* Don't update huge pages here */
860         if (entry->huge)
861                 return;
862
863         for (level = 0, pbo = bo->parent; pbo; ++level)
864                 pbo = pbo->parent;
865
866         level += params->adev->vm_manager.root_level;
867         pt = amdgpu_bo_gpu_offset(entry->base.bo);
868         flags = AMDGPU_PTE_VALID;
869         amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
870         pde = (entry - parent->entries) * 8;
871         if (bo->shadow)
872                 params->func(params, bo->shadow, pde, pt, 1, 0, flags);
873         params->func(params, bo, pde, pt, 1, 0, flags);
874 }
875
876 /*
877  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
878  *
879  * @parent: parent PD
880  *
881  * Mark all PD level as invalid after an error.
882  */
883 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
884                                        struct amdgpu_vm *vm,
885                                        struct amdgpu_vm_pt *parent,
886                                        unsigned level)
887 {
888         unsigned pt_idx, num_entries;
889
890         /*
891          * Recurse into the subdirectories. This recursion is harmless because
892          * we only have a maximum of 5 layers.
893          */
894         num_entries = amdgpu_vm_num_entries(adev, level);
895         for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
896                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
897
898                 if (!entry->base.bo)
899                         continue;
900
901                 spin_lock(&vm->status_lock);
902                 if (list_empty(&entry->base.vm_status))
903                         list_add(&entry->base.vm_status, &vm->relocated);
904                 spin_unlock(&vm->status_lock);
905                 amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
906         }
907 }
908
909 /*
910  * amdgpu_vm_update_directories - make sure that all directories are valid
911  *
912  * @adev: amdgpu_device pointer
913  * @vm: requested vm
914  *
915  * Makes sure all directories are up to date.
916  * Returns 0 for success, error for failure.
917  */
918 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
919                                  struct amdgpu_vm *vm)
920 {
921         struct amdgpu_pte_update_params params;
922         struct amdgpu_job *job;
923         unsigned ndw = 0;
924         int r = 0;
925
926         if (list_empty(&vm->relocated))
927                 return 0;
928
929 restart:
930         memset(&params, 0, sizeof(params));
931         params.adev = adev;
932
933         if (vm->use_cpu_for_update) {
934                 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
935                 if (unlikely(r))
936                         return r;
937
938                 params.func = amdgpu_vm_cpu_set_ptes;
939         } else {
940                 ndw = 512 * 8;
941                 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
942                 if (r)
943                         return r;
944
945                 params.ib = &job->ibs[0];
946                 params.func = amdgpu_vm_do_set_ptes;
947         }
948
949         spin_lock(&vm->status_lock);
950         while (!list_empty(&vm->relocated)) {
951                 struct amdgpu_vm_bo_base *bo_base, *parent;
952                 struct amdgpu_vm_pt *pt, *entry;
953                 struct amdgpu_bo *bo;
954
955                 bo_base = list_first_entry(&vm->relocated,
956                                            struct amdgpu_vm_bo_base,
957                                            vm_status);
958                 list_del_init(&bo_base->vm_status);
959                 spin_unlock(&vm->status_lock);
960
961                 bo = bo_base->bo->parent;
962                 if (!bo) {
963                         spin_lock(&vm->status_lock);
964                         continue;
965                 }
966
967                 parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
968                                           bo_list);
969                 pt = container_of(parent, struct amdgpu_vm_pt, base);
970                 entry = container_of(bo_base, struct amdgpu_vm_pt, base);
971
972                 amdgpu_vm_update_pde(&params, vm, pt, entry);
973
974                 spin_lock(&vm->status_lock);
975                 if (!vm->use_cpu_for_update &&
976                     (ndw - params.ib->length_dw) < 32)
977                         break;
978         }
979         spin_unlock(&vm->status_lock);
980
981         if (vm->use_cpu_for_update) {
982                 /* Flush HDP */
983                 mb();
984                 amdgpu_asic_flush_hdp(adev, NULL);
985         } else if (params.ib->length_dw == 0) {
986                 amdgpu_job_free(job);
987         } else {
988                 struct amdgpu_bo *root = vm->root.base.bo;
989                 struct amdgpu_ring *ring;
990                 struct dma_fence *fence;
991
992                 ring = container_of(vm->entity.sched, struct amdgpu_ring,
993                                     sched);
994
995                 amdgpu_ring_pad_ib(ring, params.ib);
996                 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
997                                  AMDGPU_FENCE_OWNER_VM, false);
998                 WARN_ON(params.ib->length_dw > ndw);
999                 r = amdgpu_job_submit(job, ring, &vm->entity,
1000                                       AMDGPU_FENCE_OWNER_VM, &fence);
1001                 if (r)
1002                         goto error;
1003
1004                 amdgpu_bo_fence(root, fence, true);
1005                 dma_fence_put(vm->last_update);
1006                 vm->last_update = fence;
1007         }
1008
1009         if (!list_empty(&vm->relocated))
1010                 goto restart;
1011
1012         return 0;
1013
1014 error:
1015         amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1016                                    adev->vm_manager.root_level);
1017         amdgpu_job_free(job);
1018         return r;
1019 }
1020
1021 /**
1022  * amdgpu_vm_find_entry - find the entry for an address
1023  *
1024  * @p: see amdgpu_pte_update_params definition
1025  * @addr: virtual address in question
1026  * @entry: resulting entry or NULL
1027  * @parent: parent entry
1028  *
1029  * Find the vm_pt entry and it's parent for the given address.
1030  */
1031 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1032                          struct amdgpu_vm_pt **entry,
1033                          struct amdgpu_vm_pt **parent)
1034 {
1035         unsigned level = p->adev->vm_manager.root_level;
1036
1037         *parent = NULL;
1038         *entry = &p->vm->root;
1039         while ((*entry)->entries) {
1040                 unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1041
1042                 *parent = *entry;
1043                 *entry = &(*entry)->entries[addr >> shift];
1044                 addr &= (1ULL << shift) - 1;
1045         }
1046
1047         if (level != AMDGPU_VM_PTB)
1048                 *entry = NULL;
1049 }
1050
1051 /**
1052  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1053  *
1054  * @p: see amdgpu_pte_update_params definition
1055  * @entry: vm_pt entry to check
1056  * @parent: parent entry
1057  * @nptes: number of PTEs updated with this operation
1058  * @dst: destination address where the PTEs should point to
1059  * @flags: access flags fro the PTEs
1060  *
1061  * Check if we can update the PD with a huge page.
1062  */
1063 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1064                                         struct amdgpu_vm_pt *entry,
1065                                         struct amdgpu_vm_pt *parent,
1066                                         unsigned nptes, uint64_t dst,
1067                                         uint64_t flags)
1068 {
1069         uint64_t pde;
1070
1071         /* In the case of a mixed PT the PDE must point to it*/
1072         if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1073             nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1074                 /* Set the huge page flag to stop scanning at this PDE */
1075                 flags |= AMDGPU_PDE_PTE;
1076         }
1077
1078         if (!(flags & AMDGPU_PDE_PTE)) {
1079                 if (entry->huge) {
1080                         /* Add the entry to the relocated list to update it. */
1081                         entry->huge = false;
1082                         spin_lock(&p->vm->status_lock);
1083                         list_move(&entry->base.vm_status, &p->vm->relocated);
1084                         spin_unlock(&p->vm->status_lock);
1085                 }
1086                 return;
1087         }
1088
1089         entry->huge = true;
1090         amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1091
1092         pde = (entry - parent->entries) * 8;
1093         if (parent->base.bo->shadow)
1094                 p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1095         p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1096 }
1097
1098 /**
1099  * amdgpu_vm_update_ptes - make sure that page tables are valid
1100  *
1101  * @params: see amdgpu_pte_update_params definition
1102  * @vm: requested vm
1103  * @start: start of GPU address range
1104  * @end: end of GPU address range
1105  * @dst: destination address to map to, the next dst inside the function
1106  * @flags: mapping flags
1107  *
1108  * Update the page tables in the range @start - @end.
1109  * Returns 0 for success, -EINVAL for failure.
1110  */
1111 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1112                                   uint64_t start, uint64_t end,
1113                                   uint64_t dst, uint64_t flags)
1114 {
1115         struct amdgpu_device *adev = params->adev;
1116         const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1117
1118         uint64_t addr, pe_start;
1119         struct amdgpu_bo *pt;
1120         unsigned nptes;
1121
1122         /* walk over the address space and update the page tables */
1123         for (addr = start; addr < end; addr += nptes,
1124              dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1125                 struct amdgpu_vm_pt *entry, *parent;
1126
1127                 amdgpu_vm_get_entry(params, addr, &entry, &parent);
1128                 if (!entry)
1129                         return -ENOENT;
1130
1131                 if ((addr & ~mask) == (end & ~mask))
1132                         nptes = end - addr;
1133                 else
1134                         nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1135
1136                 amdgpu_vm_handle_huge_pages(params, entry, parent,
1137                                             nptes, dst, flags);
1138                 /* We don't need to update PTEs for huge pages */
1139                 if (entry->huge)
1140                         continue;
1141
1142                 pt = entry->base.bo;
1143                 pe_start = (addr & mask) * 8;
1144                 if (pt->shadow)
1145                         params->func(params, pt->shadow, pe_start, dst, nptes,
1146                                      AMDGPU_GPU_PAGE_SIZE, flags);
1147                 params->func(params, pt, pe_start, dst, nptes,
1148                              AMDGPU_GPU_PAGE_SIZE, flags);
1149         }
1150
1151         return 0;
1152 }
1153
1154 /*
1155  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1156  *
1157  * @params: see amdgpu_pte_update_params definition
1158  * @vm: requested vm
1159  * @start: first PTE to handle
1160  * @end: last PTE to handle
1161  * @dst: addr those PTEs should point to
1162  * @flags: hw mapping flags
1163  * Returns 0 for success, -EINVAL for failure.
1164  */
1165 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params  *params,
1166                                 uint64_t start, uint64_t end,
1167                                 uint64_t dst, uint64_t flags)
1168 {
1169         /**
1170          * The MC L1 TLB supports variable sized pages, based on a fragment
1171          * field in the PTE. When this field is set to a non-zero value, page
1172          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1173          * flags are considered valid for all PTEs within the fragment range
1174          * and corresponding mappings are assumed to be physically contiguous.
1175          *
1176          * The L1 TLB can store a single PTE for the whole fragment,
1177          * significantly increasing the space available for translation
1178          * caching. This leads to large improvements in throughput when the
1179          * TLB is under pressure.
1180          *
1181          * The L2 TLB distributes small and large fragments into two
1182          * asymmetric partitions. The large fragment cache is significantly
1183          * larger. Thus, we try to use large fragments wherever possible.
1184          * Userspace can support this by aligning virtual base address and
1185          * allocation size to the fragment size.
1186          */
1187         unsigned max_frag = params->adev->vm_manager.fragment_size;
1188         int r;
1189
1190         /* system pages are non continuously */
1191         if (params->src || !(flags & AMDGPU_PTE_VALID))
1192                 return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1193
1194         while (start != end) {
1195                 uint64_t frag_flags, frag_end;
1196                 unsigned frag;
1197
1198                 /* This intentionally wraps around if no bit is set */
1199                 frag = min((unsigned)ffs(start) - 1,
1200                            (unsigned)fls64(end - start) - 1);
1201                 if (frag >= max_frag) {
1202                         frag_flags = AMDGPU_PTE_FRAG(max_frag);
1203                         frag_end = end & ~((1ULL << max_frag) - 1);
1204                 } else {
1205                         frag_flags = AMDGPU_PTE_FRAG(frag);
1206                         frag_end = start + (1 << frag);
1207                 }
1208
1209                 r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1210                                           flags | frag_flags);
1211                 if (r)
1212                         return r;
1213
1214                 dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1215                 start = frag_end;
1216         }
1217
1218         return 0;
1219 }
1220
1221 /**
1222  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1223  *
1224  * @adev: amdgpu_device pointer
1225  * @exclusive: fence we need to sync to
1226  * @pages_addr: DMA addresses to use for mapping
1227  * @vm: requested vm
1228  * @start: start of mapped range
1229  * @last: last mapped entry
1230  * @flags: flags for the entries
1231  * @addr: addr to set the area to
1232  * @fence: optional resulting fence
1233  *
1234  * Fill in the page table entries between @start and @last.
1235  * Returns 0 for success, -EINVAL for failure.
1236  */
1237 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1238                                        struct dma_fence *exclusive,
1239                                        dma_addr_t *pages_addr,
1240                                        struct amdgpu_vm *vm,
1241                                        uint64_t start, uint64_t last,
1242                                        uint64_t flags, uint64_t addr,
1243                                        struct dma_fence **fence)
1244 {
1245         struct amdgpu_ring *ring;
1246         void *owner = AMDGPU_FENCE_OWNER_VM;
1247         unsigned nptes, ncmds, ndw;
1248         struct amdgpu_job *job;
1249         struct amdgpu_pte_update_params params;
1250         struct dma_fence *f = NULL;
1251         int r;
1252
1253         memset(&params, 0, sizeof(params));
1254         params.adev = adev;
1255         params.vm = vm;
1256
1257         /* sync to everything on unmapping */
1258         if (!(flags & AMDGPU_PTE_VALID))
1259                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1260
1261         if (vm->use_cpu_for_update) {
1262                 /* params.src is used as flag to indicate system Memory */
1263                 if (pages_addr)
1264                         params.src = ~0;
1265
1266                 /* Wait for PT BOs to be free. PTs share the same resv. object
1267                  * as the root PD BO
1268                  */
1269                 r = amdgpu_vm_wait_pd(adev, vm, owner);
1270                 if (unlikely(r))
1271                         return r;
1272
1273                 params.func = amdgpu_vm_cpu_set_ptes;
1274                 params.pages_addr = pages_addr;
1275                 return amdgpu_vm_frag_ptes(&params, start, last + 1,
1276                                            addr, flags);
1277         }
1278
1279         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1280
1281         nptes = last - start + 1;
1282
1283         /*
1284          * reserve space for two commands every (1 << BLOCK_SIZE)
1285          *  entries or 2k dwords (whatever is smaller)
1286          *
1287          * The second command is for the shadow pagetables.
1288          */
1289         if (vm->root.base.bo->shadow)
1290                 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1291         else
1292                 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1293
1294         /* padding, etc. */
1295         ndw = 64;
1296
1297         if (pages_addr) {
1298                 /* copy commands needed */
1299                 ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1300
1301                 /* and also PTEs */
1302                 ndw += nptes * 2;
1303
1304                 params.func = amdgpu_vm_do_copy_ptes;
1305
1306         } else {
1307                 /* set page commands needed */
1308                 ndw += ncmds * 10;
1309
1310                 /* extra commands for begin/end fragments */
1311                 ndw += 2 * 10 * adev->vm_manager.fragment_size;
1312
1313                 params.func = amdgpu_vm_do_set_ptes;
1314         }
1315
1316         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1317         if (r)
1318                 return r;
1319
1320         params.ib = &job->ibs[0];
1321
1322         if (pages_addr) {
1323                 uint64_t *pte;
1324                 unsigned i;
1325
1326                 /* Put the PTEs at the end of the IB. */
1327                 i = ndw - nptes * 2;
1328                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1329                 params.src = job->ibs->gpu_addr + i * 4;
1330
1331                 for (i = 0; i < nptes; ++i) {
1332                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1333                                                     AMDGPU_GPU_PAGE_SIZE);
1334                         pte[i] |= flags;
1335                 }
1336                 addr = 0;
1337         }
1338
1339         r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1340         if (r)
1341                 goto error_free;
1342
1343         r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1344                              owner, false);
1345         if (r)
1346                 goto error_free;
1347
1348         r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1349         if (r)
1350                 goto error_free;
1351
1352         r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1353         if (r)
1354                 goto error_free;
1355
1356         amdgpu_ring_pad_ib(ring, params.ib);
1357         WARN_ON(params.ib->length_dw > ndw);
1358         r = amdgpu_job_submit(job, ring, &vm->entity,
1359                               AMDGPU_FENCE_OWNER_VM, &f);
1360         if (r)
1361                 goto error_free;
1362
1363         amdgpu_bo_fence(vm->root.base.bo, f, true);
1364         dma_fence_put(*fence);
1365         *fence = f;
1366         return 0;
1367
1368 error_free:
1369         amdgpu_job_free(job);
1370         return r;
1371 }
1372
1373 /**
1374  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1375  *
1376  * @adev: amdgpu_device pointer
1377  * @exclusive: fence we need to sync to
1378  * @pages_addr: DMA addresses to use for mapping
1379  * @vm: requested vm
1380  * @mapping: mapped range and flags to use for the update
1381  * @flags: HW flags for the mapping
1382  * @nodes: array of drm_mm_nodes with the MC addresses
1383  * @fence: optional resulting fence
1384  *
1385  * Split the mapping into smaller chunks so that each update fits
1386  * into a SDMA IB.
1387  * Returns 0 for success, -EINVAL for failure.
1388  */
1389 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1390                                       struct dma_fence *exclusive,
1391                                       dma_addr_t *pages_addr,
1392                                       struct amdgpu_vm *vm,
1393                                       struct amdgpu_bo_va_mapping *mapping,
1394                                       uint64_t flags,
1395                                       struct drm_mm_node *nodes,
1396                                       struct dma_fence **fence)
1397 {
1398         unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1399         uint64_t pfn, start = mapping->start;
1400         int r;
1401
1402         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1403          * but in case of something, we filter the flags in first place
1404          */
1405         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1406                 flags &= ~AMDGPU_PTE_READABLE;
1407         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1408                 flags &= ~AMDGPU_PTE_WRITEABLE;
1409
1410         flags &= ~AMDGPU_PTE_EXECUTABLE;
1411         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1412
1413         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1414         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1415
1416         if ((mapping->flags & AMDGPU_PTE_PRT) &&
1417             (adev->asic_type >= CHIP_VEGA10)) {
1418                 flags |= AMDGPU_PTE_PRT;
1419                 flags &= ~AMDGPU_PTE_VALID;
1420         }
1421
1422         trace_amdgpu_vm_bo_update(mapping);
1423
1424         pfn = mapping->offset >> PAGE_SHIFT;
1425         if (nodes) {
1426                 while (pfn >= nodes->size) {
1427                         pfn -= nodes->size;
1428                         ++nodes;
1429                 }
1430         }
1431
1432         do {
1433                 dma_addr_t *dma_addr = NULL;
1434                 uint64_t max_entries;
1435                 uint64_t addr, last;
1436
1437                 if (nodes) {
1438                         addr = nodes->start << PAGE_SHIFT;
1439                         max_entries = (nodes->size - pfn) *
1440                                 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1441                 } else {
1442                         addr = 0;
1443                         max_entries = S64_MAX;
1444                 }
1445
1446                 if (pages_addr) {
1447                         uint64_t count;
1448
1449                         max_entries = min(max_entries, 16ull * 1024ull);
1450                         for (count = 1; count < max_entries; ++count) {
1451                                 uint64_t idx = pfn + count;
1452
1453                                 if (pages_addr[idx] !=
1454                                     (pages_addr[idx - 1] + PAGE_SIZE))
1455                                         break;
1456                         }
1457
1458                         if (count < min_linear_pages) {
1459                                 addr = pfn << PAGE_SHIFT;
1460                                 dma_addr = pages_addr;
1461                         } else {
1462                                 addr = pages_addr[pfn];
1463                                 max_entries = count;
1464                         }
1465
1466                 } else if (flags & AMDGPU_PTE_VALID) {
1467                         addr += adev->vm_manager.vram_base_offset;
1468                         addr += pfn << PAGE_SHIFT;
1469                 }
1470
1471                 last = min((uint64_t)mapping->last, start + max_entries - 1);
1472                 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1473                                                 start, last, flags, addr,
1474                                                 fence);
1475                 if (r)
1476                         return r;
1477
1478                 pfn += last - start + 1;
1479                 if (nodes && nodes->size == pfn) {
1480                         pfn = 0;
1481                         ++nodes;
1482                 }
1483                 start = last + 1;
1484
1485         } while (unlikely(start != mapping->last + 1));
1486
1487         return 0;
1488 }
1489
1490 /**
1491  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1492  *
1493  * @adev: amdgpu_device pointer
1494  * @bo_va: requested BO and VM object
1495  * @clear: if true clear the entries
1496  *
1497  * Fill in the page table entries for @bo_va.
1498  * Returns 0 for success, -EINVAL for failure.
1499  */
1500 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1501                         struct amdgpu_bo_va *bo_va,
1502                         bool clear)
1503 {
1504         struct amdgpu_bo *bo = bo_va->base.bo;
1505         struct amdgpu_vm *vm = bo_va->base.vm;
1506         struct amdgpu_bo_va_mapping *mapping;
1507         dma_addr_t *pages_addr = NULL;
1508         struct ttm_mem_reg *mem;
1509         struct drm_mm_node *nodes;
1510         struct dma_fence *exclusive, **last_update;
1511         uint64_t flags;
1512         uint32_t mem_type;
1513         int r;
1514
1515         if (clear || !bo_va->base.bo) {
1516                 mem = NULL;
1517                 nodes = NULL;
1518                 exclusive = NULL;
1519         } else {
1520                 struct ttm_dma_tt *ttm;
1521
1522                 mem = &bo_va->base.bo->tbo.mem;
1523                 nodes = mem->mm_node;
1524                 if (mem->mem_type == TTM_PL_TT) {
1525                         ttm = container_of(bo_va->base.bo->tbo.ttm,
1526                                            struct ttm_dma_tt, ttm);
1527                         pages_addr = ttm->dma_address;
1528                 }
1529                 exclusive = reservation_object_get_excl(bo->tbo.resv);
1530         }
1531
1532         if (bo)
1533                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1534         else
1535                 flags = 0x0;
1536
1537         if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1538                 last_update = &vm->last_update;
1539         else
1540                 last_update = &bo_va->last_pt_update;
1541
1542         if (!clear && bo_va->base.moved) {
1543                 bo_va->base.moved = false;
1544                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1545
1546         } else if (bo_va->cleared != clear) {
1547                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1548         }
1549
1550         list_for_each_entry(mapping, &bo_va->invalids, list) {
1551                 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1552                                                mapping, flags, nodes,
1553                                                last_update);
1554                 if (r)
1555                         return r;
1556         }
1557
1558         if (vm->use_cpu_for_update) {
1559                 /* Flush HDP */
1560                 mb();
1561                 amdgpu_asic_flush_hdp(adev, NULL);
1562         }
1563
1564         spin_lock(&vm->status_lock);
1565         list_del_init(&bo_va->base.vm_status);
1566
1567         /* If the BO is not in its preferred location add it back to
1568          * the evicted list so that it gets validated again on the
1569          * next command submission.
1570          */
1571         mem_type = bo->tbo.mem.mem_type;
1572         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
1573             !(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1574                 list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1575         spin_unlock(&vm->status_lock);
1576
1577         list_splice_init(&bo_va->invalids, &bo_va->valids);
1578         bo_va->cleared = clear;
1579
1580         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1581                 list_for_each_entry(mapping, &bo_va->valids, list)
1582                         trace_amdgpu_vm_bo_mapping(mapping);
1583         }
1584
1585         return 0;
1586 }
1587
1588 /**
1589  * amdgpu_vm_update_prt_state - update the global PRT state
1590  */
1591 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1592 {
1593         unsigned long flags;
1594         bool enable;
1595
1596         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1597         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1598         adev->gmc.gmc_funcs->set_prt(adev, enable);
1599         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1600 }
1601
1602 /**
1603  * amdgpu_vm_prt_get - add a PRT user
1604  */
1605 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1606 {
1607         if (!adev->gmc.gmc_funcs->set_prt)
1608                 return;
1609
1610         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1611                 amdgpu_vm_update_prt_state(adev);
1612 }
1613
1614 /**
1615  * amdgpu_vm_prt_put - drop a PRT user
1616  */
1617 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1618 {
1619         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1620                 amdgpu_vm_update_prt_state(adev);
1621 }
1622
1623 /**
1624  * amdgpu_vm_prt_cb - callback for updating the PRT status
1625  */
1626 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1627 {
1628         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1629
1630         amdgpu_vm_prt_put(cb->adev);
1631         kfree(cb);
1632 }
1633
1634 /**
1635  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1636  */
1637 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1638                                  struct dma_fence *fence)
1639 {
1640         struct amdgpu_prt_cb *cb;
1641
1642         if (!adev->gmc.gmc_funcs->set_prt)
1643                 return;
1644
1645         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1646         if (!cb) {
1647                 /* Last resort when we are OOM */
1648                 if (fence)
1649                         dma_fence_wait(fence, false);
1650
1651                 amdgpu_vm_prt_put(adev);
1652         } else {
1653                 cb->adev = adev;
1654                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1655                                                      amdgpu_vm_prt_cb))
1656                         amdgpu_vm_prt_cb(fence, &cb->cb);
1657         }
1658 }
1659
1660 /**
1661  * amdgpu_vm_free_mapping - free a mapping
1662  *
1663  * @adev: amdgpu_device pointer
1664  * @vm: requested vm
1665  * @mapping: mapping to be freed
1666  * @fence: fence of the unmap operation
1667  *
1668  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1669  */
1670 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1671                                    struct amdgpu_vm *vm,
1672                                    struct amdgpu_bo_va_mapping *mapping,
1673                                    struct dma_fence *fence)
1674 {
1675         if (mapping->flags & AMDGPU_PTE_PRT)
1676                 amdgpu_vm_add_prt_cb(adev, fence);
1677         kfree(mapping);
1678 }
1679
1680 /**
1681  * amdgpu_vm_prt_fini - finish all prt mappings
1682  *
1683  * @adev: amdgpu_device pointer
1684  * @vm: requested vm
1685  *
1686  * Register a cleanup callback to disable PRT support after VM dies.
1687  */
1688 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1689 {
1690         struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1691         struct dma_fence *excl, **shared;
1692         unsigned i, shared_count;
1693         int r;
1694
1695         r = reservation_object_get_fences_rcu(resv, &excl,
1696                                               &shared_count, &shared);
1697         if (r) {
1698                 /* Not enough memory to grab the fence list, as last resort
1699                  * block for all the fences to complete.
1700                  */
1701                 reservation_object_wait_timeout_rcu(resv, true, false,
1702                                                     MAX_SCHEDULE_TIMEOUT);
1703                 return;
1704         }
1705
1706         /* Add a callback for each fence in the reservation object */
1707         amdgpu_vm_prt_get(adev);
1708         amdgpu_vm_add_prt_cb(adev, excl);
1709
1710         for (i = 0; i < shared_count; ++i) {
1711                 amdgpu_vm_prt_get(adev);
1712                 amdgpu_vm_add_prt_cb(adev, shared[i]);
1713         }
1714
1715         kfree(shared);
1716 }
1717
1718 /**
1719  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1720  *
1721  * @adev: amdgpu_device pointer
1722  * @vm: requested vm
1723  * @fence: optional resulting fence (unchanged if no work needed to be done
1724  * or if an error occurred)
1725  *
1726  * Make sure all freed BOs are cleared in the PT.
1727  * Returns 0 for success.
1728  *
1729  * PTs have to be reserved and mutex must be locked!
1730  */
1731 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1732                           struct amdgpu_vm *vm,
1733                           struct dma_fence **fence)
1734 {
1735         struct amdgpu_bo_va_mapping *mapping;
1736         uint64_t init_pte_value = 0;
1737         struct dma_fence *f = NULL;
1738         int r;
1739
1740         while (!list_empty(&vm->freed)) {
1741                 mapping = list_first_entry(&vm->freed,
1742                         struct amdgpu_bo_va_mapping, list);
1743                 list_del(&mapping->list);
1744
1745                 if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1746                         init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1747
1748                 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1749                                                 mapping->start, mapping->last,
1750                                                 init_pte_value, 0, &f);
1751                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1752                 if (r) {
1753                         dma_fence_put(f);
1754                         return r;
1755                 }
1756         }
1757
1758         if (fence && f) {
1759                 dma_fence_put(*fence);
1760                 *fence = f;
1761         } else {
1762                 dma_fence_put(f);
1763         }
1764
1765         return 0;
1766
1767 }
1768
1769 /**
1770  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1771  *
1772  * @adev: amdgpu_device pointer
1773  * @vm: requested vm
1774  * @sync: sync object to add fences to
1775  *
1776  * Make sure all BOs which are moved are updated in the PTs.
1777  * Returns 0 for success.
1778  *
1779  * PTs have to be reserved!
1780  */
1781 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1782                            struct amdgpu_vm *vm)
1783 {
1784         bool clear;
1785         int r = 0;
1786
1787         spin_lock(&vm->status_lock);
1788         while (!list_empty(&vm->moved)) {
1789                 struct amdgpu_bo_va *bo_va;
1790                 struct reservation_object *resv;
1791
1792                 bo_va = list_first_entry(&vm->moved,
1793                         struct amdgpu_bo_va, base.vm_status);
1794                 spin_unlock(&vm->status_lock);
1795
1796                 resv = bo_va->base.bo->tbo.resv;
1797
1798                 /* Per VM BOs never need to bo cleared in the page tables */
1799                 if (resv == vm->root.base.bo->tbo.resv)
1800                         clear = false;
1801                 /* Try to reserve the BO to avoid clearing its ptes */
1802                 else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1803                         clear = false;
1804                 /* Somebody else is using the BO right now */
1805                 else
1806                         clear = true;
1807
1808                 r = amdgpu_vm_bo_update(adev, bo_va, clear);
1809                 if (r)
1810                         return r;
1811
1812                 if (!clear && resv != vm->root.base.bo->tbo.resv)
1813                         reservation_object_unlock(resv);
1814
1815                 spin_lock(&vm->status_lock);
1816         }
1817         spin_unlock(&vm->status_lock);
1818
1819         return r;
1820 }
1821
1822 /**
1823  * amdgpu_vm_bo_add - add a bo to a specific vm
1824  *
1825  * @adev: amdgpu_device pointer
1826  * @vm: requested vm
1827  * @bo: amdgpu buffer object
1828  *
1829  * Add @bo into the requested vm.
1830  * Add @bo to the list of bos associated with the vm
1831  * Returns newly added bo_va or NULL for failure
1832  *
1833  * Object has to be reserved!
1834  */
1835 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1836                                       struct amdgpu_vm *vm,
1837                                       struct amdgpu_bo *bo)
1838 {
1839         struct amdgpu_bo_va *bo_va;
1840
1841         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1842         if (bo_va == NULL) {
1843                 return NULL;
1844         }
1845         bo_va->base.vm = vm;
1846         bo_va->base.bo = bo;
1847         INIT_LIST_HEAD(&bo_va->base.bo_list);
1848         INIT_LIST_HEAD(&bo_va->base.vm_status);
1849
1850         bo_va->ref_count = 1;
1851         INIT_LIST_HEAD(&bo_va->valids);
1852         INIT_LIST_HEAD(&bo_va->invalids);
1853
1854         if (!bo)
1855                 return bo_va;
1856
1857         list_add_tail(&bo_va->base.bo_list, &bo->va);
1858
1859         if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
1860                 return bo_va;
1861
1862         if (bo->preferred_domains &
1863             amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
1864                 return bo_va;
1865
1866         /*
1867          * We checked all the prerequisites, but it looks like this per VM BO
1868          * is currently evicted. add the BO to the evicted list to make sure it
1869          * is validated on next VM use to avoid fault.
1870          * */
1871         spin_lock(&vm->status_lock);
1872         list_move_tail(&bo_va->base.vm_status, &vm->evicted);
1873         spin_unlock(&vm->status_lock);
1874
1875         return bo_va;
1876 }
1877
1878
1879 /**
1880  * amdgpu_vm_bo_insert_mapping - insert a new mapping
1881  *
1882  * @adev: amdgpu_device pointer
1883  * @bo_va: bo_va to store the address
1884  * @mapping: the mapping to insert
1885  *
1886  * Insert a new mapping into all structures.
1887  */
1888 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1889                                     struct amdgpu_bo_va *bo_va,
1890                                     struct amdgpu_bo_va_mapping *mapping)
1891 {
1892         struct amdgpu_vm *vm = bo_va->base.vm;
1893         struct amdgpu_bo *bo = bo_va->base.bo;
1894
1895         mapping->bo_va = bo_va;
1896         list_add(&mapping->list, &bo_va->invalids);
1897         amdgpu_vm_it_insert(mapping, &vm->va);
1898
1899         if (mapping->flags & AMDGPU_PTE_PRT)
1900                 amdgpu_vm_prt_get(adev);
1901
1902         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1903                 spin_lock(&vm->status_lock);
1904                 if (list_empty(&bo_va->base.vm_status))
1905                         list_add(&bo_va->base.vm_status, &vm->moved);
1906                 spin_unlock(&vm->status_lock);
1907         }
1908         trace_amdgpu_vm_bo_map(bo_va, mapping);
1909 }
1910
1911 /**
1912  * amdgpu_vm_bo_map - map bo inside a vm
1913  *
1914  * @adev: amdgpu_device pointer
1915  * @bo_va: bo_va to store the address
1916  * @saddr: where to map the BO
1917  * @offset: requested offset in the BO
1918  * @flags: attributes of pages (read/write/valid/etc.)
1919  *
1920  * Add a mapping of the BO at the specefied addr into the VM.
1921  * Returns 0 for success, error for failure.
1922  *
1923  * Object has to be reserved and unreserved outside!
1924  */
1925 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1926                      struct amdgpu_bo_va *bo_va,
1927                      uint64_t saddr, uint64_t offset,
1928                      uint64_t size, uint64_t flags)
1929 {
1930         struct amdgpu_bo_va_mapping *mapping, *tmp;
1931         struct amdgpu_bo *bo = bo_va->base.bo;
1932         struct amdgpu_vm *vm = bo_va->base.vm;
1933         uint64_t eaddr;
1934
1935         /* validate the parameters */
1936         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1937             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1938                 return -EINVAL;
1939
1940         /* make sure object fit at this offset */
1941         eaddr = saddr + size - 1;
1942         if (saddr >= eaddr ||
1943             (bo && offset + size > amdgpu_bo_size(bo)))
1944                 return -EINVAL;
1945
1946         saddr /= AMDGPU_GPU_PAGE_SIZE;
1947         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1948
1949         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1950         if (tmp) {
1951                 /* bo and tmp overlap, invalid addr */
1952                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1953                         "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1954                         tmp->start, tmp->last + 1);
1955                 return -EINVAL;
1956         }
1957
1958         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1959         if (!mapping)
1960                 return -ENOMEM;
1961
1962         mapping->start = saddr;
1963         mapping->last = eaddr;
1964         mapping->offset = offset;
1965         mapping->flags = flags;
1966
1967         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1968
1969         return 0;
1970 }
1971
1972 /**
1973  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1974  *
1975  * @adev: amdgpu_device pointer
1976  * @bo_va: bo_va to store the address
1977  * @saddr: where to map the BO
1978  * @offset: requested offset in the BO
1979  * @flags: attributes of pages (read/write/valid/etc.)
1980  *
1981  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1982  * mappings as we do so.
1983  * Returns 0 for success, error for failure.
1984  *
1985  * Object has to be reserved and unreserved outside!
1986  */
1987 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1988                              struct amdgpu_bo_va *bo_va,
1989                              uint64_t saddr, uint64_t offset,
1990                              uint64_t size, uint64_t flags)
1991 {
1992         struct amdgpu_bo_va_mapping *mapping;
1993         struct amdgpu_bo *bo = bo_va->base.bo;
1994         uint64_t eaddr;
1995         int r;
1996
1997         /* validate the parameters */
1998         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1999             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2000                 return -EINVAL;
2001
2002         /* make sure object fit at this offset */
2003         eaddr = saddr + size - 1;
2004         if (saddr >= eaddr ||
2005             (bo && offset + size > amdgpu_bo_size(bo)))
2006                 return -EINVAL;
2007
2008         /* Allocate all the needed memory */
2009         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2010         if (!mapping)
2011                 return -ENOMEM;
2012
2013         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2014         if (r) {
2015                 kfree(mapping);
2016                 return r;
2017         }
2018
2019         saddr /= AMDGPU_GPU_PAGE_SIZE;
2020         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2021
2022         mapping->start = saddr;
2023         mapping->last = eaddr;
2024         mapping->offset = offset;
2025         mapping->flags = flags;
2026
2027         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2028
2029         return 0;
2030 }
2031
2032 /**
2033  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2034  *
2035  * @adev: amdgpu_device pointer
2036  * @bo_va: bo_va to remove the address from
2037  * @saddr: where to the BO is mapped
2038  *
2039  * Remove a mapping of the BO at the specefied addr from the VM.
2040  * Returns 0 for success, error for failure.
2041  *
2042  * Object has to be reserved and unreserved outside!
2043  */
2044 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2045                        struct amdgpu_bo_va *bo_va,
2046                        uint64_t saddr)
2047 {
2048         struct amdgpu_bo_va_mapping *mapping;
2049         struct amdgpu_vm *vm = bo_va->base.vm;
2050         bool valid = true;
2051
2052         saddr /= AMDGPU_GPU_PAGE_SIZE;
2053
2054         list_for_each_entry(mapping, &bo_va->valids, list) {
2055                 if (mapping->start == saddr)
2056                         break;
2057         }
2058
2059         if (&mapping->list == &bo_va->valids) {
2060                 valid = false;
2061
2062                 list_for_each_entry(mapping, &bo_va->invalids, list) {
2063                         if (mapping->start == saddr)
2064                                 break;
2065                 }
2066
2067                 if (&mapping->list == &bo_va->invalids)
2068                         return -ENOENT;
2069         }
2070
2071         list_del(&mapping->list);
2072         amdgpu_vm_it_remove(mapping, &vm->va);
2073         mapping->bo_va = NULL;
2074         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2075
2076         if (valid)
2077                 list_add(&mapping->list, &vm->freed);
2078         else
2079                 amdgpu_vm_free_mapping(adev, vm, mapping,
2080                                        bo_va->last_pt_update);
2081
2082         return 0;
2083 }
2084
2085 /**
2086  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2087  *
2088  * @adev: amdgpu_device pointer
2089  * @vm: VM structure to use
2090  * @saddr: start of the range
2091  * @size: size of the range
2092  *
2093  * Remove all mappings in a range, split them as appropriate.
2094  * Returns 0 for success, error for failure.
2095  */
2096 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2097                                 struct amdgpu_vm *vm,
2098                                 uint64_t saddr, uint64_t size)
2099 {
2100         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2101         LIST_HEAD(removed);
2102         uint64_t eaddr;
2103
2104         eaddr = saddr + size - 1;
2105         saddr /= AMDGPU_GPU_PAGE_SIZE;
2106         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2107
2108         /* Allocate all the needed memory */
2109         before = kzalloc(sizeof(*before), GFP_KERNEL);
2110         if (!before)
2111                 return -ENOMEM;
2112         INIT_LIST_HEAD(&before->list);
2113
2114         after = kzalloc(sizeof(*after), GFP_KERNEL);
2115         if (!after) {
2116                 kfree(before);
2117                 return -ENOMEM;
2118         }
2119         INIT_LIST_HEAD(&after->list);
2120
2121         /* Now gather all removed mappings */
2122         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2123         while (tmp) {
2124                 /* Remember mapping split at the start */
2125                 if (tmp->start < saddr) {
2126                         before->start = tmp->start;
2127                         before->last = saddr - 1;
2128                         before->offset = tmp->offset;
2129                         before->flags = tmp->flags;
2130                         list_add(&before->list, &tmp->list);
2131                 }
2132
2133                 /* Remember mapping split at the end */
2134                 if (tmp->last > eaddr) {
2135                         after->start = eaddr + 1;
2136                         after->last = tmp->last;
2137                         after->offset = tmp->offset;
2138                         after->offset += after->start - tmp->start;
2139                         after->flags = tmp->flags;
2140                         list_add(&after->list, &tmp->list);
2141                 }
2142
2143                 list_del(&tmp->list);
2144                 list_add(&tmp->list, &removed);
2145
2146                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2147         }
2148
2149         /* And free them up */
2150         list_for_each_entry_safe(tmp, next, &removed, list) {
2151                 amdgpu_vm_it_remove(tmp, &vm->va);
2152                 list_del(&tmp->list);
2153
2154                 if (tmp->start < saddr)
2155                     tmp->start = saddr;
2156                 if (tmp->last > eaddr)
2157                     tmp->last = eaddr;
2158
2159                 tmp->bo_va = NULL;
2160                 list_add(&tmp->list, &vm->freed);
2161                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2162         }
2163
2164         /* Insert partial mapping before the range */
2165         if (!list_empty(&before->list)) {
2166                 amdgpu_vm_it_insert(before, &vm->va);
2167                 if (before->flags & AMDGPU_PTE_PRT)
2168                         amdgpu_vm_prt_get(adev);
2169         } else {
2170                 kfree(before);
2171         }
2172
2173         /* Insert partial mapping after the range */
2174         if (!list_empty(&after->list)) {
2175                 amdgpu_vm_it_insert(after, &vm->va);
2176                 if (after->flags & AMDGPU_PTE_PRT)
2177                         amdgpu_vm_prt_get(adev);
2178         } else {
2179                 kfree(after);
2180         }
2181
2182         return 0;
2183 }
2184
2185 /**
2186  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2187  *
2188  * @vm: the requested VM
2189  *
2190  * Find a mapping by it's address.
2191  */
2192 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2193                                                          uint64_t addr)
2194 {
2195         return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2196 }
2197
2198 /**
2199  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2200  *
2201  * @adev: amdgpu_device pointer
2202  * @bo_va: requested bo_va
2203  *
2204  * Remove @bo_va->bo from the requested vm.
2205  *
2206  * Object have to be reserved!
2207  */
2208 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2209                       struct amdgpu_bo_va *bo_va)
2210 {
2211         struct amdgpu_bo_va_mapping *mapping, *next;
2212         struct amdgpu_vm *vm = bo_va->base.vm;
2213
2214         list_del(&bo_va->base.bo_list);
2215
2216         spin_lock(&vm->status_lock);
2217         list_del(&bo_va->base.vm_status);
2218         spin_unlock(&vm->status_lock);
2219
2220         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2221                 list_del(&mapping->list);
2222                 amdgpu_vm_it_remove(mapping, &vm->va);
2223                 mapping->bo_va = NULL;
2224                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2225                 list_add(&mapping->list, &vm->freed);
2226         }
2227         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2228                 list_del(&mapping->list);
2229                 amdgpu_vm_it_remove(mapping, &vm->va);
2230                 amdgpu_vm_free_mapping(adev, vm, mapping,
2231                                        bo_va->last_pt_update);
2232         }
2233
2234         dma_fence_put(bo_va->last_pt_update);
2235         kfree(bo_va);
2236 }
2237
2238 /**
2239  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2240  *
2241  * @adev: amdgpu_device pointer
2242  * @vm: requested vm
2243  * @bo: amdgpu buffer object
2244  *
2245  * Mark @bo as invalid.
2246  */
2247 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2248                              struct amdgpu_bo *bo, bool evicted)
2249 {
2250         struct amdgpu_vm_bo_base *bo_base;
2251
2252         list_for_each_entry(bo_base, &bo->va, bo_list) {
2253                 struct amdgpu_vm *vm = bo_base->vm;
2254
2255                 bo_base->moved = true;
2256                 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2257                         spin_lock(&bo_base->vm->status_lock);
2258                         if (bo->tbo.type == ttm_bo_type_kernel)
2259                                 list_move(&bo_base->vm_status, &vm->evicted);
2260                         else
2261                                 list_move_tail(&bo_base->vm_status,
2262                                                &vm->evicted);
2263                         spin_unlock(&bo_base->vm->status_lock);
2264                         continue;
2265                 }
2266
2267                 if (bo->tbo.type == ttm_bo_type_kernel) {
2268                         spin_lock(&bo_base->vm->status_lock);
2269                         if (list_empty(&bo_base->vm_status))
2270                                 list_add(&bo_base->vm_status, &vm->relocated);
2271                         spin_unlock(&bo_base->vm->status_lock);
2272                         continue;
2273                 }
2274
2275                 spin_lock(&bo_base->vm->status_lock);
2276                 if (list_empty(&bo_base->vm_status))
2277                         list_add(&bo_base->vm_status, &vm->moved);
2278                 spin_unlock(&bo_base->vm->status_lock);
2279         }
2280 }
2281
2282 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2283 {
2284         /* Total bits covered by PD + PTs */
2285         unsigned bits = ilog2(vm_size) + 18;
2286
2287         /* Make sure the PD is 4K in size up to 8GB address space.
2288            Above that split equal between PD and PTs */
2289         if (vm_size <= 8)
2290                 return (bits - 9);
2291         else
2292                 return ((bits + 3) / 2);
2293 }
2294
2295 /**
2296  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2297  *
2298  * @adev: amdgpu_device pointer
2299  * @vm_size: the default vm size if it's set auto
2300  */
2301 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2302                            uint32_t fragment_size_default, unsigned max_level,
2303                            unsigned max_bits)
2304 {
2305         uint64_t tmp;
2306
2307         /* adjust vm size first */
2308         if (amdgpu_vm_size != -1) {
2309                 unsigned max_size = 1 << (max_bits - 30);
2310
2311                 vm_size = amdgpu_vm_size;
2312                 if (vm_size > max_size) {
2313                         dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2314                                  amdgpu_vm_size, max_size);
2315                         vm_size = max_size;
2316                 }
2317         }
2318
2319         adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2320
2321         tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2322         if (amdgpu_vm_block_size != -1)
2323                 tmp >>= amdgpu_vm_block_size - 9;
2324         tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2325         adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2326         switch (adev->vm_manager.num_level) {
2327         case 3:
2328                 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2329                 break;
2330         case 2:
2331                 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2332                 break;
2333         case 1:
2334                 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2335                 break;
2336         default:
2337                 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2338         }
2339         /* block size depends on vm size and hw setup*/
2340         if (amdgpu_vm_block_size != -1)
2341                 adev->vm_manager.block_size =
2342                         min((unsigned)amdgpu_vm_block_size, max_bits
2343                             - AMDGPU_GPU_PAGE_SHIFT
2344                             - 9 * adev->vm_manager.num_level);
2345         else if (adev->vm_manager.num_level > 1)
2346                 adev->vm_manager.block_size = 9;
2347         else
2348                 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2349
2350         if (amdgpu_vm_fragment_size == -1)
2351                 adev->vm_manager.fragment_size = fragment_size_default;
2352         else
2353                 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2354
2355         DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2356                  vm_size, adev->vm_manager.num_level + 1,
2357                  adev->vm_manager.block_size,
2358                  adev->vm_manager.fragment_size);
2359 }
2360
2361 /**
2362  * amdgpu_vm_init - initialize a vm instance
2363  *
2364  * @adev: amdgpu_device pointer
2365  * @vm: requested vm
2366  * @vm_context: Indicates if it GFX or Compute context
2367  *
2368  * Init @vm fields.
2369  */
2370 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2371                    int vm_context, unsigned int pasid)
2372 {
2373         struct amdgpu_bo_param bp;
2374         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2375                 AMDGPU_VM_PTE_COUNT(adev) * 8);
2376         unsigned ring_instance;
2377         struct amdgpu_ring *ring;
2378         struct drm_sched_rq *rq;
2379         unsigned long size;
2380         uint64_t flags;
2381         int r, i;
2382
2383         vm->va = RB_ROOT_CACHED;
2384         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2385                 vm->reserved_vmid[i] = NULL;
2386         spin_lock_init(&vm->status_lock);
2387         INIT_LIST_HEAD(&vm->evicted);
2388         INIT_LIST_HEAD(&vm->relocated);
2389         INIT_LIST_HEAD(&vm->moved);
2390         INIT_LIST_HEAD(&vm->freed);
2391
2392         /* create scheduler entity for page table updates */
2393
2394         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2395         ring_instance %= adev->vm_manager.vm_pte_num_rings;
2396         ring = adev->vm_manager.vm_pte_rings[ring_instance];
2397         rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2398         r = drm_sched_entity_init(&ring->sched, &vm->entity,
2399                                   rq, amdgpu_sched_jobs, NULL);
2400         if (r)
2401                 return r;
2402
2403         vm->pte_support_ats = false;
2404
2405         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2406                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2407                                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2408
2409                 if (adev->asic_type == CHIP_RAVEN)
2410                         vm->pte_support_ats = true;
2411         } else {
2412                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2413                                                 AMDGPU_VM_USE_CPU_FOR_GFX);
2414         }
2415         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2416                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2417         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2418                   "CPU update of VM recommended only for large BAR system\n");
2419         vm->last_update = NULL;
2420
2421         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2422         if (vm->use_cpu_for_update)
2423                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2424         else
2425                 flags |= AMDGPU_GEM_CREATE_SHADOW;
2426
2427         size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2428         memset(&bp, 0, sizeof(bp));
2429         bp.size = size;
2430         bp.byte_align = align;
2431         bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2432         bp.flags = flags;
2433         bp.type = ttm_bo_type_kernel;
2434         bp.resv = NULL;
2435         r = amdgpu_bo_create(adev, &bp, &vm->root.base.bo);
2436         if (r)
2437                 goto error_free_sched_entity;
2438
2439         r = amdgpu_bo_reserve(vm->root.base.bo, true);
2440         if (r)
2441                 goto error_free_root;
2442
2443         r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2444                                adev->vm_manager.root_level,
2445                                vm->pte_support_ats);
2446         if (r)
2447                 goto error_unreserve;
2448
2449         vm->root.base.vm = vm;
2450         list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2451         list_add_tail(&vm->root.base.vm_status, &vm->evicted);
2452         amdgpu_bo_unreserve(vm->root.base.bo);
2453
2454         if (pasid) {
2455                 unsigned long flags;
2456
2457                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2458                 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2459                               GFP_ATOMIC);
2460                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2461                 if (r < 0)
2462                         goto error_free_root;
2463
2464                 vm->pasid = pasid;
2465         }
2466
2467         INIT_KFIFO(vm->faults);
2468         vm->fault_credit = 16;
2469
2470         return 0;
2471
2472 error_unreserve:
2473         amdgpu_bo_unreserve(vm->root.base.bo);
2474
2475 error_free_root:
2476         amdgpu_bo_unref(&vm->root.base.bo->shadow);
2477         amdgpu_bo_unref(&vm->root.base.bo);
2478         vm->root.base.bo = NULL;
2479
2480 error_free_sched_entity:
2481         drm_sched_entity_fini(&ring->sched, &vm->entity);
2482
2483         return r;
2484 }
2485
2486 /**
2487  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2488  *
2489  * This only works on GFX VMs that don't have any BOs added and no
2490  * page tables allocated yet.
2491  *
2492  * Changes the following VM parameters:
2493  * - use_cpu_for_update
2494  * - pte_supports_ats
2495  * - pasid (old PASID is released, because compute manages its own PASIDs)
2496  *
2497  * Reinitializes the page directory to reflect the changed ATS
2498  * setting. May leave behind an unused shadow BO for the page
2499  * directory when switching from SDMA updates to CPU updates.
2500  *
2501  * Returns 0 for success, -errno for errors.
2502  */
2503 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2504 {
2505         bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2506         int r;
2507
2508         r = amdgpu_bo_reserve(vm->root.base.bo, true);
2509         if (r)
2510                 return r;
2511
2512         /* Sanity checks */
2513         if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2514                 r = -EINVAL;
2515                 goto error;
2516         }
2517
2518         /* Check if PD needs to be reinitialized and do it before
2519          * changing any other state, in case it fails.
2520          */
2521         if (pte_support_ats != vm->pte_support_ats) {
2522                 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2523                                adev->vm_manager.root_level,
2524                                pte_support_ats);
2525                 if (r)
2526                         goto error;
2527         }
2528
2529         /* Update VM state */
2530         vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2531                                     AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2532         vm->pte_support_ats = pte_support_ats;
2533         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2534                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2535         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2536                   "CPU update of VM recommended only for large BAR system\n");
2537
2538         if (vm->pasid) {
2539                 unsigned long flags;
2540
2541                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2542                 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2543                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2544
2545                 vm->pasid = 0;
2546         }
2547
2548 error:
2549         amdgpu_bo_unreserve(vm->root.base.bo);
2550         return r;
2551 }
2552
2553 /**
2554  * amdgpu_vm_free_levels - free PD/PT levels
2555  *
2556  * @adev: amdgpu device structure
2557  * @parent: PD/PT starting level to free
2558  * @level: level of parent structure
2559  *
2560  * Free the page directory or page table level and all sub levels.
2561  */
2562 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2563                                   struct amdgpu_vm_pt *parent,
2564                                   unsigned level)
2565 {
2566         unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2567
2568         if (parent->base.bo) {
2569                 list_del(&parent->base.bo_list);
2570                 list_del(&parent->base.vm_status);
2571                 amdgpu_bo_unref(&parent->base.bo->shadow);
2572                 amdgpu_bo_unref(&parent->base.bo);
2573         }
2574
2575         if (parent->entries)
2576                 for (i = 0; i < num_entries; i++)
2577                         amdgpu_vm_free_levels(adev, &parent->entries[i],
2578                                               level + 1);
2579
2580         kvfree(parent->entries);
2581 }
2582
2583 /**
2584  * amdgpu_vm_fini - tear down a vm instance
2585  *
2586  * @adev: amdgpu_device pointer
2587  * @vm: requested vm
2588  *
2589  * Tear down @vm.
2590  * Unbind the VM and remove all bos from the vm bo list
2591  */
2592 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2593 {
2594         struct amdgpu_bo_va_mapping *mapping, *tmp;
2595         bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2596         struct amdgpu_bo *root;
2597         u64 fault;
2598         int i, r;
2599
2600         amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2601
2602         /* Clear pending page faults from IH when the VM is destroyed */
2603         while (kfifo_get(&vm->faults, &fault))
2604                 amdgpu_ih_clear_fault(adev, fault);
2605
2606         if (vm->pasid) {
2607                 unsigned long flags;
2608
2609                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2610                 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2611                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2612         }
2613
2614         drm_sched_entity_fini(vm->entity.sched, &vm->entity);
2615
2616         if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2617                 dev_err(adev->dev, "still active bo inside vm\n");
2618         }
2619         rbtree_postorder_for_each_entry_safe(mapping, tmp,
2620                                              &vm->va.rb_root, rb) {
2621                 list_del(&mapping->list);
2622                 amdgpu_vm_it_remove(mapping, &vm->va);
2623                 kfree(mapping);
2624         }
2625         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2626                 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2627                         amdgpu_vm_prt_fini(adev, vm);
2628                         prt_fini_needed = false;
2629                 }
2630
2631                 list_del(&mapping->list);
2632                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2633         }
2634
2635         root = amdgpu_bo_ref(vm->root.base.bo);
2636         r = amdgpu_bo_reserve(root, true);
2637         if (r) {
2638                 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2639         } else {
2640                 amdgpu_vm_free_levels(adev, &vm->root,
2641                                       adev->vm_manager.root_level);
2642                 amdgpu_bo_unreserve(root);
2643         }
2644         amdgpu_bo_unref(&root);
2645         dma_fence_put(vm->last_update);
2646         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2647                 amdgpu_vmid_free_reserved(adev, vm, i);
2648 }
2649
2650 /**
2651  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2652  *
2653  * @adev: amdgpu_device pointer
2654  * @pasid: PASID do identify the VM
2655  *
2656  * This function is expected to be called in interrupt context. Returns
2657  * true if there was fault credit, false otherwise
2658  */
2659 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2660                                   unsigned int pasid)
2661 {
2662         struct amdgpu_vm *vm;
2663
2664         spin_lock(&adev->vm_manager.pasid_lock);
2665         vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2666         if (!vm) {
2667                 /* VM not found, can't track fault credit */
2668                 spin_unlock(&adev->vm_manager.pasid_lock);
2669                 return true;
2670         }
2671
2672         /* No lock needed. only accessed by IRQ handler */
2673         if (!vm->fault_credit) {
2674                 /* Too many faults in this VM */
2675                 spin_unlock(&adev->vm_manager.pasid_lock);
2676                 return false;
2677         }
2678
2679         vm->fault_credit--;
2680         spin_unlock(&adev->vm_manager.pasid_lock);
2681         return true;
2682 }
2683
2684 /**
2685  * amdgpu_vm_manager_init - init the VM manager
2686  *
2687  * @adev: amdgpu_device pointer
2688  *
2689  * Initialize the VM manager structures
2690  */
2691 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2692 {
2693         unsigned i;
2694
2695         amdgpu_vmid_mgr_init(adev);
2696
2697         adev->vm_manager.fence_context =
2698                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2699         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2700                 adev->vm_manager.seqno[i] = 0;
2701
2702         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2703         spin_lock_init(&adev->vm_manager.prt_lock);
2704         atomic_set(&adev->vm_manager.num_prt_users, 0);
2705
2706         /* If not overridden by the user, by default, only in large BAR systems
2707          * Compute VM tables will be updated by CPU
2708          */
2709 #ifdef CONFIG_X86_64
2710         if (amdgpu_vm_update_mode == -1) {
2711                 if (amdgpu_vm_is_large_bar(adev))
2712                         adev->vm_manager.vm_update_mode =
2713                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2714                 else
2715                         adev->vm_manager.vm_update_mode = 0;
2716         } else
2717                 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2718 #else
2719         adev->vm_manager.vm_update_mode = 0;
2720 #endif
2721
2722         idr_init(&adev->vm_manager.pasid_idr);
2723         spin_lock_init(&adev->vm_manager.pasid_lock);
2724 }
2725
2726 /**
2727  * amdgpu_vm_manager_fini - cleanup VM manager
2728  *
2729  * @adev: amdgpu_device pointer
2730  *
2731  * Cleanup the VM manager and free resources.
2732  */
2733 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2734 {
2735         WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
2736         idr_destroy(&adev->vm_manager.pasid_idr);
2737
2738         amdgpu_vmid_mgr_fini(adev);
2739 }
2740
2741 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2742 {
2743         union drm_amdgpu_vm *args = data;
2744         struct amdgpu_device *adev = dev->dev_private;
2745         struct amdgpu_fpriv *fpriv = filp->driver_priv;
2746         int r;
2747
2748         switch (args->in.op) {
2749         case AMDGPU_VM_OP_RESERVE_VMID:
2750                 /* current, we only have requirement to reserve vmid from gfxhub */
2751                 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2752                 if (r)
2753                         return r;
2754                 break;
2755         case AMDGPU_VM_OP_UNRESERVE_VMID:
2756                 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2757                 break;
2758         default:
2759                 return -EINVAL;
2760         }
2761
2762         return 0;
2763 }
This page took 0.20077 seconds and 4 git commands to generate.