]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drm/amdgpu: track evicted page tables v2
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <drm/drmP.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34
35 /*
36  * GPUVM
37  * GPUVM is similar to the legacy gart on older asics, however
38  * rather than there being a single global gart table
39  * for the entire GPU, there are multiple VM page tables active
40  * at any given time.  The VM page tables can contain a mix
41  * vram pages and system memory pages and system memory pages
42  * can be mapped as snooped (cached system pages) or unsnooped
43  * (uncached system pages).
44  * Each VM has an ID associated with it and there is a page table
45  * associated with each VMID.  When execting a command buffer,
46  * the kernel tells the the ring what VMID to use for that command
47  * buffer.  VMIDs are allocated dynamically as commands are submitted.
48  * The userspace drivers maintain their own address space and the kernel
49  * sets up their pages tables accordingly when they submit their
50  * command buffers and a VMID is assigned.
51  * Cayman/Trinity support up to 8 active VMs at any given time;
52  * SI supports 16.
53  */
54
55 #define START(node) ((node)->start)
56 #define LAST(node) ((node)->last)
57
58 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
59                      START, LAST, static, amdgpu_vm_it)
60
61 #undef START
62 #undef LAST
63
64 /* Local structure. Encapsulate some VM table update parameters to reduce
65  * the number of function parameters
66  */
67 struct amdgpu_pte_update_params {
68         /* amdgpu device we do this update for */
69         struct amdgpu_device *adev;
70         /* optional amdgpu_vm we do this update for */
71         struct amdgpu_vm *vm;
72         /* address where to copy page table entries from */
73         uint64_t src;
74         /* indirect buffer to fill with commands */
75         struct amdgpu_ib *ib;
76         /* Function which actually does the update */
77         void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
78                      uint64_t addr, unsigned count, uint32_t incr,
79                      uint64_t flags);
80         /* The next two are used during VM update by CPU
81          *  DMA addresses to use for mapping
82          *  Kernel pointer of PD/PT BO that needs to be updated
83          */
84         dma_addr_t *pages_addr;
85         void *kptr;
86 };
87
88 /* Helper to disable partial resident texture feature from a fence callback */
89 struct amdgpu_prt_cb {
90         struct amdgpu_device *adev;
91         struct dma_fence_cb cb;
92 };
93
94 /**
95  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
96  *
97  * @adev: amdgpu_device pointer
98  *
99  * Calculate the number of entries in a page directory or page table.
100  */
101 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
102                                       unsigned level)
103 {
104         if (level == 0)
105                 /* For the root directory */
106                 return adev->vm_manager.max_pfn >>
107                         (adev->vm_manager.block_size *
108                          adev->vm_manager.num_level);
109         else if (level == adev->vm_manager.num_level)
110                 /* For the page tables on the leaves */
111                 return AMDGPU_VM_PTE_COUNT(adev);
112         else
113                 /* Everything in between */
114                 return 1 << adev->vm_manager.block_size;
115 }
116
117 /**
118  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
119  *
120  * @adev: amdgpu_device pointer
121  *
122  * Calculate the size of the BO for a page directory or page table in bytes.
123  */
124 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
125 {
126         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
127 }
128
129 /**
130  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
131  *
132  * @vm: vm providing the BOs
133  * @validated: head of validation list
134  * @entry: entry to add
135  *
136  * Add the page directory to the list of BOs to
137  * validate for command submission.
138  */
139 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
140                          struct list_head *validated,
141                          struct amdgpu_bo_list_entry *entry)
142 {
143         entry->robj = vm->root.base.bo;
144         entry->priority = 0;
145         entry->tv.bo = &entry->robj->tbo;
146         entry->tv.shared = true;
147         entry->user_pages = NULL;
148         list_add(&entry->tv.head, validated);
149 }
150
151 /**
152  * amdgpu_vm_validate_pt_bos - validate the page table BOs
153  *
154  * @adev: amdgpu device pointer
155  * @vm: vm providing the BOs
156  * @validate: callback to do the validation
157  * @param: parameter for the validation callback
158  *
159  * Validate the page table BOs on command submission if neccessary.
160  */
161 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
162                               int (*validate)(void *p, struct amdgpu_bo *bo),
163                               void *param)
164 {
165         struct ttm_bo_global *glob = adev->mman.bdev.glob;
166         int r;
167
168         spin_lock(&vm->status_lock);
169         while (!list_empty(&vm->evicted)) {
170                 struct amdgpu_vm_bo_base *bo_base;
171                 struct amdgpu_bo *bo;
172
173                 bo_base = list_first_entry(&vm->evicted,
174                                            struct amdgpu_vm_bo_base,
175                                            vm_status);
176                 spin_unlock(&vm->status_lock);
177
178                 bo = bo_base->bo;
179                 BUG_ON(!bo);
180                 if (bo->parent) {
181                         r = validate(param, bo);
182                         if (r)
183                                 return r;
184
185                         spin_lock(&glob->lru_lock);
186                         ttm_bo_move_to_lru_tail(&bo->tbo);
187                         if (bo->shadow)
188                                 ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
189                         spin_unlock(&glob->lru_lock);
190                 }
191
192                 if (vm->use_cpu_for_update) {
193                         r = amdgpu_bo_kmap(bo, NULL);
194                         if (r)
195                                 return r;
196                 }
197
198                 spin_lock(&vm->status_lock);
199                 list_del_init(&bo_base->vm_status);
200         }
201         spin_unlock(&vm->status_lock);
202
203         return 0;
204 }
205
206 /**
207  * amdgpu_vm_ready - check VM is ready for updates
208  *
209  * @vm: VM to check
210  *
211  * Check if all VM PDs/PTs are ready for updates
212  */
213 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
214 {
215         bool ready;
216
217         spin_lock(&vm->status_lock);
218         ready = list_empty(&vm->evicted);
219         spin_unlock(&vm->status_lock);
220
221         return ready;
222 }
223
224 /**
225  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
226  *
227  * @adev: amdgpu_device pointer
228  * @vm: requested vm
229  * @saddr: start of the address range
230  * @eaddr: end of the address range
231  *
232  * Make sure the page directories and page tables are allocated
233  */
234 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
235                                   struct amdgpu_vm *vm,
236                                   struct amdgpu_vm_pt *parent,
237                                   uint64_t saddr, uint64_t eaddr,
238                                   unsigned level)
239 {
240         unsigned shift = (adev->vm_manager.num_level - level) *
241                 adev->vm_manager.block_size;
242         unsigned pt_idx, from, to;
243         int r;
244         u64 flags;
245         uint64_t init_value = 0;
246
247         if (!parent->entries) {
248                 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
249
250                 parent->entries = kvmalloc_array(num_entries,
251                                                    sizeof(struct amdgpu_vm_pt),
252                                                    GFP_KERNEL | __GFP_ZERO);
253                 if (!parent->entries)
254                         return -ENOMEM;
255                 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
256         }
257
258         from = saddr >> shift;
259         to = eaddr >> shift;
260         if (from >= amdgpu_vm_num_entries(adev, level) ||
261             to >= amdgpu_vm_num_entries(adev, level))
262                 return -EINVAL;
263
264         if (to > parent->last_entry_used)
265                 parent->last_entry_used = to;
266
267         ++level;
268         saddr = saddr & ((1 << shift) - 1);
269         eaddr = eaddr & ((1 << shift) - 1);
270
271         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
272                         AMDGPU_GEM_CREATE_VRAM_CLEARED;
273         if (vm->use_cpu_for_update)
274                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
275         else
276                 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
277                                 AMDGPU_GEM_CREATE_SHADOW);
278
279         if (vm->pte_support_ats) {
280                 init_value = AMDGPU_PTE_SYSTEM;
281                 if (level != adev->vm_manager.num_level - 1)
282                         init_value |= AMDGPU_PDE_PTE;
283         }
284
285         /* walk over the address space and allocate the page tables */
286         for (pt_idx = from; pt_idx <= to; ++pt_idx) {
287                 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
288                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
289                 struct amdgpu_bo *pt;
290
291                 if (!entry->base.bo) {
292                         r = amdgpu_bo_create(adev,
293                                              amdgpu_vm_bo_size(adev, level),
294                                              AMDGPU_GPU_PAGE_SIZE, true,
295                                              AMDGPU_GEM_DOMAIN_VRAM,
296                                              flags,
297                                              NULL, resv, init_value, &pt);
298                         if (r)
299                                 return r;
300
301                         if (vm->use_cpu_for_update) {
302                                 r = amdgpu_bo_kmap(pt, NULL);
303                                 if (r) {
304                                         amdgpu_bo_unref(&pt);
305                                         return r;
306                                 }
307                         }
308
309                         /* Keep a reference to the root directory to avoid
310                         * freeing them up in the wrong order.
311                         */
312                         pt->parent = amdgpu_bo_ref(vm->root.base.bo);
313
314                         entry->base.vm = vm;
315                         entry->base.bo = pt;
316                         list_add_tail(&entry->base.bo_list, &pt->va);
317                         INIT_LIST_HEAD(&entry->base.vm_status);
318                         entry->addr = 0;
319                 }
320
321                 if (level < adev->vm_manager.num_level) {
322                         uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
323                         uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
324                                 ((1 << shift) - 1);
325                         r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
326                                                    sub_eaddr, level);
327                         if (r)
328                                 return r;
329                 }
330         }
331
332         return 0;
333 }
334
335 /**
336  * amdgpu_vm_alloc_pts - Allocate page tables.
337  *
338  * @adev: amdgpu_device pointer
339  * @vm: VM to allocate page tables for
340  * @saddr: Start address which needs to be allocated
341  * @size: Size from start address we need.
342  *
343  * Make sure the page tables are allocated.
344  */
345 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
346                         struct amdgpu_vm *vm,
347                         uint64_t saddr, uint64_t size)
348 {
349         uint64_t last_pfn;
350         uint64_t eaddr;
351
352         /* validate the parameters */
353         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
354                 return -EINVAL;
355
356         eaddr = saddr + size - 1;
357         last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
358         if (last_pfn >= adev->vm_manager.max_pfn) {
359                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
360                         last_pfn, adev->vm_manager.max_pfn);
361                 return -EINVAL;
362         }
363
364         saddr /= AMDGPU_GPU_PAGE_SIZE;
365         eaddr /= AMDGPU_GPU_PAGE_SIZE;
366
367         return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
368 }
369
370 /**
371  * amdgpu_vm_had_gpu_reset - check if reset occured since last use
372  *
373  * @adev: amdgpu_device pointer
374  * @id: VMID structure
375  *
376  * Check if GPU reset occured since last use of the VMID.
377  */
378 static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
379                                     struct amdgpu_vm_id *id)
380 {
381         return id->current_gpu_reset_count !=
382                 atomic_read(&adev->gpu_reset_counter);
383 }
384
385 static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
386 {
387         return !!vm->reserved_vmid[vmhub];
388 }
389
390 /* idr_mgr->lock must be held */
391 static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
392                                                struct amdgpu_ring *ring,
393                                                struct amdgpu_sync *sync,
394                                                struct dma_fence *fence,
395                                                struct amdgpu_job *job)
396 {
397         struct amdgpu_device *adev = ring->adev;
398         unsigned vmhub = ring->funcs->vmhub;
399         uint64_t fence_context = adev->fence_context + ring->idx;
400         struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
401         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
402         struct dma_fence *updates = sync->last_vm_update;
403         int r = 0;
404         struct dma_fence *flushed, *tmp;
405         bool needs_flush = vm->use_cpu_for_update;
406
407         flushed  = id->flushed_updates;
408         if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
409             (atomic64_read(&id->owner) != vm->client_id) ||
410             (job->vm_pd_addr != id->pd_gpu_addr) ||
411             (updates && (!flushed || updates->context != flushed->context ||
412                         dma_fence_is_later(updates, flushed))) ||
413             (!id->last_flush || (id->last_flush->context != fence_context &&
414                                  !dma_fence_is_signaled(id->last_flush)))) {
415                 needs_flush = true;
416                 /* to prevent one context starved by another context */
417                 id->pd_gpu_addr = 0;
418                 tmp = amdgpu_sync_peek_fence(&id->active, ring);
419                 if (tmp) {
420                         r = amdgpu_sync_fence(adev, sync, tmp);
421                         return r;
422                 }
423         }
424
425         /* Good we can use this VMID. Remember this submission as
426         * user of the VMID.
427         */
428         r = amdgpu_sync_fence(ring->adev, &id->active, fence);
429         if (r)
430                 goto out;
431
432         if (updates && (!flushed || updates->context != flushed->context ||
433                         dma_fence_is_later(updates, flushed))) {
434                 dma_fence_put(id->flushed_updates);
435                 id->flushed_updates = dma_fence_get(updates);
436         }
437         id->pd_gpu_addr = job->vm_pd_addr;
438         atomic64_set(&id->owner, vm->client_id);
439         job->vm_needs_flush = needs_flush;
440         if (needs_flush) {
441                 dma_fence_put(id->last_flush);
442                 id->last_flush = NULL;
443         }
444         job->vm_id = id - id_mgr->ids;
445         trace_amdgpu_vm_grab_id(vm, ring, job);
446 out:
447         return r;
448 }
449
450 /**
451  * amdgpu_vm_grab_id - allocate the next free VMID
452  *
453  * @vm: vm to allocate id for
454  * @ring: ring we want to submit job to
455  * @sync: sync object where we add dependencies
456  * @fence: fence protecting ID from reuse
457  *
458  * Allocate an id for the vm, adding fences to the sync obj as necessary.
459  */
460 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
461                       struct amdgpu_sync *sync, struct dma_fence *fence,
462                       struct amdgpu_job *job)
463 {
464         struct amdgpu_device *adev = ring->adev;
465         unsigned vmhub = ring->funcs->vmhub;
466         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
467         uint64_t fence_context = adev->fence_context + ring->idx;
468         struct dma_fence *updates = sync->last_vm_update;
469         struct amdgpu_vm_id *id, *idle;
470         struct dma_fence **fences;
471         unsigned i;
472         int r = 0;
473
474         mutex_lock(&id_mgr->lock);
475         if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
476                 r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
477                 mutex_unlock(&id_mgr->lock);
478                 return r;
479         }
480         fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
481         if (!fences) {
482                 mutex_unlock(&id_mgr->lock);
483                 return -ENOMEM;
484         }
485         /* Check if we have an idle VMID */
486         i = 0;
487         list_for_each_entry(idle, &id_mgr->ids_lru, list) {
488                 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
489                 if (!fences[i])
490                         break;
491                 ++i;
492         }
493
494         /* If we can't find a idle VMID to use, wait till one becomes available */
495         if (&idle->list == &id_mgr->ids_lru) {
496                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
497                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
498                 struct dma_fence_array *array;
499                 unsigned j;
500
501                 for (j = 0; j < i; ++j)
502                         dma_fence_get(fences[j]);
503
504                 array = dma_fence_array_create(i, fences, fence_context,
505                                            seqno, true);
506                 if (!array) {
507                         for (j = 0; j < i; ++j)
508                                 dma_fence_put(fences[j]);
509                         kfree(fences);
510                         r = -ENOMEM;
511                         goto error;
512                 }
513
514
515                 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
516                 dma_fence_put(&array->base);
517                 if (r)
518                         goto error;
519
520                 mutex_unlock(&id_mgr->lock);
521                 return 0;
522
523         }
524         kfree(fences);
525
526         job->vm_needs_flush = vm->use_cpu_for_update;
527         /* Check if we can use a VMID already assigned to this VM */
528         list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
529                 struct dma_fence *flushed;
530                 bool needs_flush = vm->use_cpu_for_update;
531
532                 /* Check all the prerequisites to using this VMID */
533                 if (amdgpu_vm_had_gpu_reset(adev, id))
534                         continue;
535
536                 if (atomic64_read(&id->owner) != vm->client_id)
537                         continue;
538
539                 if (job->vm_pd_addr != id->pd_gpu_addr)
540                         continue;
541
542                 if (!id->last_flush ||
543                     (id->last_flush->context != fence_context &&
544                      !dma_fence_is_signaled(id->last_flush)))
545                         needs_flush = true;
546
547                 flushed  = id->flushed_updates;
548                 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
549                         needs_flush = true;
550
551                 /* Concurrent flushes are only possible starting with Vega10 */
552                 if (adev->asic_type < CHIP_VEGA10 && needs_flush)
553                         continue;
554
555                 /* Good we can use this VMID. Remember this submission as
556                  * user of the VMID.
557                  */
558                 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
559                 if (r)
560                         goto error;
561
562                 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
563                         dma_fence_put(id->flushed_updates);
564                         id->flushed_updates = dma_fence_get(updates);
565                 }
566
567                 if (needs_flush)
568                         goto needs_flush;
569                 else
570                         goto no_flush_needed;
571
572         };
573
574         /* Still no ID to use? Then use the idle one found earlier */
575         id = idle;
576
577         /* Remember this submission as user of the VMID */
578         r = amdgpu_sync_fence(ring->adev, &id->active, fence);
579         if (r)
580                 goto error;
581
582         id->pd_gpu_addr = job->vm_pd_addr;
583         dma_fence_put(id->flushed_updates);
584         id->flushed_updates = dma_fence_get(updates);
585         atomic64_set(&id->owner, vm->client_id);
586
587 needs_flush:
588         job->vm_needs_flush = true;
589         dma_fence_put(id->last_flush);
590         id->last_flush = NULL;
591
592 no_flush_needed:
593         list_move_tail(&id->list, &id_mgr->ids_lru);
594
595         job->vm_id = id - id_mgr->ids;
596         trace_amdgpu_vm_grab_id(vm, ring, job);
597
598 error:
599         mutex_unlock(&id_mgr->lock);
600         return r;
601 }
602
603 static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev,
604                                           struct amdgpu_vm *vm,
605                                           unsigned vmhub)
606 {
607         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
608
609         mutex_lock(&id_mgr->lock);
610         if (vm->reserved_vmid[vmhub]) {
611                 list_add(&vm->reserved_vmid[vmhub]->list,
612                         &id_mgr->ids_lru);
613                 vm->reserved_vmid[vmhub] = NULL;
614                 atomic_dec(&id_mgr->reserved_vmid_num);
615         }
616         mutex_unlock(&id_mgr->lock);
617 }
618
619 static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
620                                          struct amdgpu_vm *vm,
621                                          unsigned vmhub)
622 {
623         struct amdgpu_vm_id_manager *id_mgr;
624         struct amdgpu_vm_id *idle;
625         int r = 0;
626
627         id_mgr = &adev->vm_manager.id_mgr[vmhub];
628         mutex_lock(&id_mgr->lock);
629         if (vm->reserved_vmid[vmhub])
630                 goto unlock;
631         if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
632             AMDGPU_VM_MAX_RESERVED_VMID) {
633                 DRM_ERROR("Over limitation of reserved vmid\n");
634                 atomic_dec(&id_mgr->reserved_vmid_num);
635                 r = -EINVAL;
636                 goto unlock;
637         }
638         /* Select the first entry VMID */
639         idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list);
640         list_del_init(&idle->list);
641         vm->reserved_vmid[vmhub] = idle;
642         mutex_unlock(&id_mgr->lock);
643
644         return 0;
645 unlock:
646         mutex_unlock(&id_mgr->lock);
647         return r;
648 }
649
650 /**
651  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
652  *
653  * @adev: amdgpu_device pointer
654  */
655 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
656 {
657         const struct amdgpu_ip_block *ip_block;
658         bool has_compute_vm_bug;
659         struct amdgpu_ring *ring;
660         int i;
661
662         has_compute_vm_bug = false;
663
664         ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
665         if (ip_block) {
666                 /* Compute has a VM bug for GFX version < 7.
667                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
668                 if (ip_block->version->major <= 7)
669                         has_compute_vm_bug = true;
670                 else if (ip_block->version->major == 8)
671                         if (adev->gfx.mec_fw_version < 673)
672                                 has_compute_vm_bug = true;
673         }
674
675         for (i = 0; i < adev->num_rings; i++) {
676                 ring = adev->rings[i];
677                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
678                         /* only compute rings */
679                         ring->has_compute_vm_bug = has_compute_vm_bug;
680                 else
681                         ring->has_compute_vm_bug = false;
682         }
683 }
684
685 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
686                                   struct amdgpu_job *job)
687 {
688         struct amdgpu_device *adev = ring->adev;
689         unsigned vmhub = ring->funcs->vmhub;
690         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
691         struct amdgpu_vm_id *id;
692         bool gds_switch_needed;
693         bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
694
695         if (job->vm_id == 0)
696                 return false;
697         id = &id_mgr->ids[job->vm_id];
698         gds_switch_needed = ring->funcs->emit_gds_switch && (
699                 id->gds_base != job->gds_base ||
700                 id->gds_size != job->gds_size ||
701                 id->gws_base != job->gws_base ||
702                 id->gws_size != job->gws_size ||
703                 id->oa_base != job->oa_base ||
704                 id->oa_size != job->oa_size);
705
706         if (amdgpu_vm_had_gpu_reset(adev, id))
707                 return true;
708
709         return vm_flush_needed || gds_switch_needed;
710 }
711
712 static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
713 {
714         return (adev->mc.real_vram_size == adev->mc.visible_vram_size);
715 }
716
717 /**
718  * amdgpu_vm_flush - hardware flush the vm
719  *
720  * @ring: ring to use for flush
721  * @vm_id: vmid number to use
722  * @pd_addr: address of the page directory
723  *
724  * Emit a VM flush when it is necessary.
725  */
726 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
727 {
728         struct amdgpu_device *adev = ring->adev;
729         unsigned vmhub = ring->funcs->vmhub;
730         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
731         struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
732         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
733                 id->gds_base != job->gds_base ||
734                 id->gds_size != job->gds_size ||
735                 id->gws_base != job->gws_base ||
736                 id->gws_size != job->gws_size ||
737                 id->oa_base != job->oa_base ||
738                 id->oa_size != job->oa_size);
739         bool vm_flush_needed = job->vm_needs_flush;
740         unsigned patch_offset = 0;
741         int r;
742
743         if (amdgpu_vm_had_gpu_reset(adev, id)) {
744                 gds_switch_needed = true;
745                 vm_flush_needed = true;
746         }
747
748         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
749                 return 0;
750
751         if (ring->funcs->init_cond_exec)
752                 patch_offset = amdgpu_ring_init_cond_exec(ring);
753
754         if (need_pipe_sync)
755                 amdgpu_ring_emit_pipeline_sync(ring);
756
757         if (ring->funcs->emit_vm_flush && vm_flush_needed) {
758                 struct dma_fence *fence;
759
760                 trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr);
761                 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
762
763                 r = amdgpu_fence_emit(ring, &fence);
764                 if (r)
765                         return r;
766
767                 mutex_lock(&id_mgr->lock);
768                 dma_fence_put(id->last_flush);
769                 id->last_flush = fence;
770                 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
771                 mutex_unlock(&id_mgr->lock);
772         }
773
774         if (ring->funcs->emit_gds_switch && gds_switch_needed) {
775                 id->gds_base = job->gds_base;
776                 id->gds_size = job->gds_size;
777                 id->gws_base = job->gws_base;
778                 id->gws_size = job->gws_size;
779                 id->oa_base = job->oa_base;
780                 id->oa_size = job->oa_size;
781                 amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
782                                             job->gds_size, job->gws_base,
783                                             job->gws_size, job->oa_base,
784                                             job->oa_size);
785         }
786
787         if (ring->funcs->patch_cond_exec)
788                 amdgpu_ring_patch_cond_exec(ring, patch_offset);
789
790         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
791         if (ring->funcs->emit_switch_buffer) {
792                 amdgpu_ring_emit_switch_buffer(ring);
793                 amdgpu_ring_emit_switch_buffer(ring);
794         }
795         return 0;
796 }
797
798 /**
799  * amdgpu_vm_reset_id - reset VMID to zero
800  *
801  * @adev: amdgpu device structure
802  * @vm_id: vmid number to use
803  *
804  * Reset saved GDW, GWS and OA to force switch on next flush.
805  */
806 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
807                         unsigned vmid)
808 {
809         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
810         struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
811
812         atomic64_set(&id->owner, 0);
813         id->gds_base = 0;
814         id->gds_size = 0;
815         id->gws_base = 0;
816         id->gws_size = 0;
817         id->oa_base = 0;
818         id->oa_size = 0;
819 }
820
821 /**
822  * amdgpu_vm_reset_all_id - reset VMID to zero
823  *
824  * @adev: amdgpu device structure
825  *
826  * Reset VMID to force flush on next use
827  */
828 void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
829 {
830         unsigned i, j;
831
832         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
833                 struct amdgpu_vm_id_manager *id_mgr =
834                         &adev->vm_manager.id_mgr[i];
835
836                 for (j = 1; j < id_mgr->num_ids; ++j)
837                         amdgpu_vm_reset_id(adev, i, j);
838         }
839 }
840
841 /**
842  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
843  *
844  * @vm: requested vm
845  * @bo: requested buffer object
846  *
847  * Find @bo inside the requested vm.
848  * Search inside the @bos vm list for the requested vm
849  * Returns the found bo_va or NULL if none is found
850  *
851  * Object has to be reserved!
852  */
853 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
854                                        struct amdgpu_bo *bo)
855 {
856         struct amdgpu_bo_va *bo_va;
857
858         list_for_each_entry(bo_va, &bo->va, base.bo_list) {
859                 if (bo_va->base.vm == vm) {
860                         return bo_va;
861                 }
862         }
863         return NULL;
864 }
865
866 /**
867  * amdgpu_vm_do_set_ptes - helper to call the right asic function
868  *
869  * @params: see amdgpu_pte_update_params definition
870  * @pe: addr of the page entry
871  * @addr: dst addr to write into pe
872  * @count: number of page entries to update
873  * @incr: increase next addr by incr bytes
874  * @flags: hw access flags
875  *
876  * Traces the parameters and calls the right asic functions
877  * to setup the page table using the DMA.
878  */
879 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
880                                   uint64_t pe, uint64_t addr,
881                                   unsigned count, uint32_t incr,
882                                   uint64_t flags)
883 {
884         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
885
886         if (count < 3) {
887                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
888                                     addr | flags, count, incr);
889
890         } else {
891                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
892                                       count, incr, flags);
893         }
894 }
895
896 /**
897  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
898  *
899  * @params: see amdgpu_pte_update_params definition
900  * @pe: addr of the page entry
901  * @addr: dst addr to write into pe
902  * @count: number of page entries to update
903  * @incr: increase next addr by incr bytes
904  * @flags: hw access flags
905  *
906  * Traces the parameters and calls the DMA function to copy the PTEs.
907  */
908 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
909                                    uint64_t pe, uint64_t addr,
910                                    unsigned count, uint32_t incr,
911                                    uint64_t flags)
912 {
913         uint64_t src = (params->src + (addr >> 12) * 8);
914
915
916         trace_amdgpu_vm_copy_ptes(pe, src, count);
917
918         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
919 }
920
921 /**
922  * amdgpu_vm_map_gart - Resolve gart mapping of addr
923  *
924  * @pages_addr: optional DMA address to use for lookup
925  * @addr: the unmapped addr
926  *
927  * Look up the physical address of the page that the pte resolves
928  * to and return the pointer for the page table entry.
929  */
930 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
931 {
932         uint64_t result;
933
934         /* page table offset */
935         result = pages_addr[addr >> PAGE_SHIFT];
936
937         /* in case cpu page size != gpu page size*/
938         result |= addr & (~PAGE_MASK);
939
940         result &= 0xFFFFFFFFFFFFF000ULL;
941
942         return result;
943 }
944
945 /**
946  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
947  *
948  * @params: see amdgpu_pte_update_params definition
949  * @pe: kmap addr of the page entry
950  * @addr: dst addr to write into pe
951  * @count: number of page entries to update
952  * @incr: increase next addr by incr bytes
953  * @flags: hw access flags
954  *
955  * Write count number of PT/PD entries directly.
956  */
957 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
958                                    uint64_t pe, uint64_t addr,
959                                    unsigned count, uint32_t incr,
960                                    uint64_t flags)
961 {
962         unsigned int i;
963         uint64_t value;
964
965         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
966
967         for (i = 0; i < count; i++) {
968                 value = params->pages_addr ?
969                         amdgpu_vm_map_gart(params->pages_addr, addr) :
970                         addr;
971                 amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
972                                         i, value, flags);
973                 addr += incr;
974         }
975 }
976
977 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
978                              void *owner)
979 {
980         struct amdgpu_sync sync;
981         int r;
982
983         amdgpu_sync_create(&sync);
984         amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner);
985         r = amdgpu_sync_wait(&sync, true);
986         amdgpu_sync_free(&sync);
987
988         return r;
989 }
990
991 /*
992  * amdgpu_vm_update_level - update a single level in the hierarchy
993  *
994  * @adev: amdgpu_device pointer
995  * @vm: requested vm
996  * @parent: parent directory
997  *
998  * Makes sure all entries in @parent are up to date.
999  * Returns 0 for success, error for failure.
1000  */
1001 static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1002                                   struct amdgpu_vm *vm,
1003                                   struct amdgpu_vm_pt *parent,
1004                                   unsigned level)
1005 {
1006         struct amdgpu_bo *shadow;
1007         struct amdgpu_ring *ring = NULL;
1008         uint64_t pd_addr, shadow_addr = 0;
1009         uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
1010         uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
1011         unsigned count = 0, pt_idx, ndw = 0;
1012         struct amdgpu_job *job;
1013         struct amdgpu_pte_update_params params;
1014         struct dma_fence *fence = NULL;
1015
1016         int r;
1017
1018         if (!parent->entries)
1019                 return 0;
1020
1021         memset(&params, 0, sizeof(params));
1022         params.adev = adev;
1023         shadow = parent->base.bo->shadow;
1024
1025         if (vm->use_cpu_for_update) {
1026                 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
1027                 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1028                 if (unlikely(r))
1029                         return r;
1030
1031                 params.func = amdgpu_vm_cpu_set_ptes;
1032         } else {
1033                 ring = container_of(vm->entity.sched, struct amdgpu_ring,
1034                                     sched);
1035
1036                 /* padding, etc. */
1037                 ndw = 64;
1038
1039                 /* assume the worst case */
1040                 ndw += parent->last_entry_used * 6;
1041
1042                 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
1043
1044                 if (shadow) {
1045                         shadow_addr = amdgpu_bo_gpu_offset(shadow);
1046                         ndw *= 2;
1047                 } else {
1048                         shadow_addr = 0;
1049                 }
1050
1051                 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1052                 if (r)
1053                         return r;
1054
1055                 params.ib = &job->ibs[0];
1056                 params.func = amdgpu_vm_do_set_ptes;
1057         }
1058
1059
1060         /* walk over the address space and update the directory */
1061         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1062                 struct amdgpu_bo *bo = parent->entries[pt_idx].base.bo;
1063                 uint64_t pde, pt;
1064
1065                 if (bo == NULL)
1066                         continue;
1067
1068                 pt = amdgpu_bo_gpu_offset(bo);
1069                 pt = amdgpu_gart_get_vm_pde(adev, pt);
1070                 /* Don't update huge pages here */
1071                 if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
1072                     parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID))
1073                         continue;
1074
1075                 parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
1076
1077                 pde = pd_addr + pt_idx * 8;
1078                 if (((last_pde + 8 * count) != pde) ||
1079                     ((last_pt + incr * count) != pt) ||
1080                     (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
1081
1082                         if (count) {
1083                                 if (shadow)
1084                                         params.func(&params,
1085                                                     last_shadow,
1086                                                     last_pt, count,
1087                                                     incr,
1088                                                     AMDGPU_PTE_VALID);
1089
1090                                 params.func(&params, last_pde,
1091                                             last_pt, count, incr,
1092                                             AMDGPU_PTE_VALID);
1093                         }
1094
1095                         count = 1;
1096                         last_pde = pde;
1097                         last_shadow = shadow_addr + pt_idx * 8;
1098                         last_pt = pt;
1099                 } else {
1100                         ++count;
1101                 }
1102         }
1103
1104         if (count) {
1105                 if (vm->root.base.bo->shadow)
1106                         params.func(&params, last_shadow, last_pt,
1107                                     count, incr, AMDGPU_PTE_VALID);
1108
1109                 params.func(&params, last_pde, last_pt,
1110                             count, incr, AMDGPU_PTE_VALID);
1111         }
1112
1113         if (!vm->use_cpu_for_update) {
1114                 if (params.ib->length_dw == 0) {
1115                         amdgpu_job_free(job);
1116                 } else {
1117                         amdgpu_ring_pad_ib(ring, params.ib);
1118                         amdgpu_sync_resv(adev, &job->sync,
1119                                          parent->base.bo->tbo.resv,
1120                                          AMDGPU_FENCE_OWNER_VM);
1121                         if (shadow)
1122                                 amdgpu_sync_resv(adev, &job->sync,
1123                                                  shadow->tbo.resv,
1124                                                  AMDGPU_FENCE_OWNER_VM);
1125
1126                         WARN_ON(params.ib->length_dw > ndw);
1127                         r = amdgpu_job_submit(job, ring, &vm->entity,
1128                                         AMDGPU_FENCE_OWNER_VM, &fence);
1129                         if (r)
1130                                 goto error_free;
1131
1132                         amdgpu_bo_fence(parent->base.bo, fence, true);
1133                         dma_fence_put(vm->last_dir_update);
1134                         vm->last_dir_update = dma_fence_get(fence);
1135                         dma_fence_put(fence);
1136                 }
1137         }
1138         /*
1139          * Recurse into the subdirectories. This recursion is harmless because
1140          * we only have a maximum of 5 layers.
1141          */
1142         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1143                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1144
1145                 if (!entry->base.bo)
1146                         continue;
1147
1148                 r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
1149                 if (r)
1150                         return r;
1151         }
1152
1153         return 0;
1154
1155 error_free:
1156         amdgpu_job_free(job);
1157         return r;
1158 }
1159
1160 /*
1161  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
1162  *
1163  * @parent: parent PD
1164  *
1165  * Mark all PD level as invalid after an error.
1166  */
1167 static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
1168 {
1169         unsigned pt_idx;
1170
1171         /*
1172          * Recurse into the subdirectories. This recursion is harmless because
1173          * we only have a maximum of 5 layers.
1174          */
1175         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1176                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1177
1178                 if (!entry->base.bo)
1179                         continue;
1180
1181                 entry->addr = ~0ULL;
1182                 amdgpu_vm_invalidate_level(entry);
1183         }
1184 }
1185
1186 /*
1187  * amdgpu_vm_update_directories - make sure that all directories are valid
1188  *
1189  * @adev: amdgpu_device pointer
1190  * @vm: requested vm
1191  *
1192  * Makes sure all directories are up to date.
1193  * Returns 0 for success, error for failure.
1194  */
1195 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1196                                  struct amdgpu_vm *vm)
1197 {
1198         int r;
1199
1200         r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
1201         if (r)
1202                 amdgpu_vm_invalidate_level(&vm->root);
1203
1204         if (vm->use_cpu_for_update) {
1205                 /* Flush HDP */
1206                 mb();
1207                 amdgpu_gart_flush_gpu_tlb(adev, 0);
1208         }
1209
1210         return r;
1211 }
1212
1213 /**
1214  * amdgpu_vm_find_entry - find the entry for an address
1215  *
1216  * @p: see amdgpu_pte_update_params definition
1217  * @addr: virtual address in question
1218  * @entry: resulting entry or NULL
1219  * @parent: parent entry
1220  *
1221  * Find the vm_pt entry and it's parent for the given address.
1222  */
1223 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1224                          struct amdgpu_vm_pt **entry,
1225                          struct amdgpu_vm_pt **parent)
1226 {
1227         unsigned idx, level = p->adev->vm_manager.num_level;
1228
1229         *parent = NULL;
1230         *entry = &p->vm->root;
1231         while ((*entry)->entries) {
1232                 idx = addr >> (p->adev->vm_manager.block_size * level--);
1233                 idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
1234                 *parent = *entry;
1235                 *entry = &(*entry)->entries[idx];
1236         }
1237
1238         if (level)
1239                 *entry = NULL;
1240 }
1241
1242 /**
1243  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1244  *
1245  * @p: see amdgpu_pte_update_params definition
1246  * @entry: vm_pt entry to check
1247  * @parent: parent entry
1248  * @nptes: number of PTEs updated with this operation
1249  * @dst: destination address where the PTEs should point to
1250  * @flags: access flags fro the PTEs
1251  *
1252  * Check if we can update the PD with a huge page.
1253  */
1254 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1255                                         struct amdgpu_vm_pt *entry,
1256                                         struct amdgpu_vm_pt *parent,
1257                                         unsigned nptes, uint64_t dst,
1258                                         uint64_t flags)
1259 {
1260         bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
1261         uint64_t pd_addr, pde;
1262
1263         /* In the case of a mixed PT the PDE must point to it*/
1264         if (p->adev->asic_type < CHIP_VEGA10 ||
1265             nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
1266             p->src ||
1267             !(flags & AMDGPU_PTE_VALID)) {
1268
1269                 dst = amdgpu_bo_gpu_offset(entry->base.bo);
1270                 dst = amdgpu_gart_get_vm_pde(p->adev, dst);
1271                 flags = AMDGPU_PTE_VALID;
1272         } else {
1273                 /* Set the huge page flag to stop scanning at this PDE */
1274                 flags |= AMDGPU_PDE_PTE;
1275         }
1276
1277         if (entry->addr == (dst | flags))
1278                 return;
1279
1280         entry->addr = (dst | flags);
1281
1282         if (use_cpu_update) {
1283                 /* In case a huge page is replaced with a system
1284                  * memory mapping, p->pages_addr != NULL and
1285                  * amdgpu_vm_cpu_set_ptes would try to translate dst
1286                  * through amdgpu_vm_map_gart. But dst is already a
1287                  * GPU address (of the page table). Disable
1288                  * amdgpu_vm_map_gart temporarily.
1289                  */
1290                 dma_addr_t *tmp;
1291
1292                 tmp = p->pages_addr;
1293                 p->pages_addr = NULL;
1294
1295                 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
1296                 pde = pd_addr + (entry - parent->entries) * 8;
1297                 amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
1298
1299                 p->pages_addr = tmp;
1300         } else {
1301                 if (parent->base.bo->shadow) {
1302                         pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
1303                         pde = pd_addr + (entry - parent->entries) * 8;
1304                         amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
1305                 }
1306                 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
1307                 pde = pd_addr + (entry - parent->entries) * 8;
1308                 amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
1309         }
1310 }
1311
1312 /**
1313  * amdgpu_vm_update_ptes - make sure that page tables are valid
1314  *
1315  * @params: see amdgpu_pte_update_params definition
1316  * @vm: requested vm
1317  * @start: start of GPU address range
1318  * @end: end of GPU address range
1319  * @dst: destination address to map to, the next dst inside the function
1320  * @flags: mapping flags
1321  *
1322  * Update the page tables in the range @start - @end.
1323  * Returns 0 for success, -EINVAL for failure.
1324  */
1325 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1326                                   uint64_t start, uint64_t end,
1327                                   uint64_t dst, uint64_t flags)
1328 {
1329         struct amdgpu_device *adev = params->adev;
1330         const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1331
1332         uint64_t addr, pe_start;
1333         struct amdgpu_bo *pt;
1334         unsigned nptes;
1335         bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
1336
1337         /* walk over the address space and update the page tables */
1338         for (addr = start; addr < end; addr += nptes,
1339              dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1340                 struct amdgpu_vm_pt *entry, *parent;
1341
1342                 amdgpu_vm_get_entry(params, addr, &entry, &parent);
1343                 if (!entry)
1344                         return -ENOENT;
1345
1346                 if ((addr & ~mask) == (end & ~mask))
1347                         nptes = end - addr;
1348                 else
1349                         nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1350
1351                 amdgpu_vm_handle_huge_pages(params, entry, parent,
1352                                             nptes, dst, flags);
1353                 /* We don't need to update PTEs for huge pages */
1354                 if (entry->addr & AMDGPU_PDE_PTE)
1355                         continue;
1356
1357                 pt = entry->base.bo;
1358                 if (use_cpu_update) {
1359                         pe_start = (unsigned long)amdgpu_bo_kptr(pt);
1360                 } else {
1361                         if (pt->shadow) {
1362                                 pe_start = amdgpu_bo_gpu_offset(pt->shadow);
1363                                 pe_start += (addr & mask) * 8;
1364                                 params->func(params, pe_start, dst, nptes,
1365                                              AMDGPU_GPU_PAGE_SIZE, flags);
1366                         }
1367                         pe_start = amdgpu_bo_gpu_offset(pt);
1368                 }
1369
1370                 pe_start += (addr & mask) * 8;
1371                 params->func(params, pe_start, dst, nptes,
1372                              AMDGPU_GPU_PAGE_SIZE, flags);
1373         }
1374
1375         return 0;
1376 }
1377
1378 /*
1379  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1380  *
1381  * @params: see amdgpu_pte_update_params definition
1382  * @vm: requested vm
1383  * @start: first PTE to handle
1384  * @end: last PTE to handle
1385  * @dst: addr those PTEs should point to
1386  * @flags: hw mapping flags
1387  * Returns 0 for success, -EINVAL for failure.
1388  */
1389 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params  *params,
1390                                 uint64_t start, uint64_t end,
1391                                 uint64_t dst, uint64_t flags)
1392 {
1393         int r;
1394
1395         /**
1396          * The MC L1 TLB supports variable sized pages, based on a fragment
1397          * field in the PTE. When this field is set to a non-zero value, page
1398          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1399          * flags are considered valid for all PTEs within the fragment range
1400          * and corresponding mappings are assumed to be physically contiguous.
1401          *
1402          * The L1 TLB can store a single PTE for the whole fragment,
1403          * significantly increasing the space available for translation
1404          * caching. This leads to large improvements in throughput when the
1405          * TLB is under pressure.
1406          *
1407          * The L2 TLB distributes small and large fragments into two
1408          * asymmetric partitions. The large fragment cache is significantly
1409          * larger. Thus, we try to use large fragments wherever possible.
1410          * Userspace can support this by aligning virtual base address and
1411          * allocation size to the fragment size.
1412          */
1413         unsigned pages_per_frag = params->adev->vm_manager.fragment_size;
1414         uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
1415         uint64_t frag_align = 1 << pages_per_frag;
1416
1417         uint64_t frag_start = ALIGN(start, frag_align);
1418         uint64_t frag_end = end & ~(frag_align - 1);
1419
1420         /* system pages are non continuously */
1421         if (params->src || !(flags & AMDGPU_PTE_VALID) ||
1422             (frag_start >= frag_end))
1423                 return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1424
1425         /* handle the 4K area at the beginning */
1426         if (start != frag_start) {
1427                 r = amdgpu_vm_update_ptes(params, start, frag_start,
1428                                           dst, flags);
1429                 if (r)
1430                         return r;
1431                 dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
1432         }
1433
1434         /* handle the area in the middle */
1435         r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
1436                                   flags | frag_flags);
1437         if (r)
1438                 return r;
1439
1440         /* handle the 4K area at the end */
1441         if (frag_end != end) {
1442                 dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
1443                 r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
1444         }
1445         return r;
1446 }
1447
1448 /**
1449  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1450  *
1451  * @adev: amdgpu_device pointer
1452  * @exclusive: fence we need to sync to
1453  * @src: address where to copy page table entries from
1454  * @pages_addr: DMA addresses to use for mapping
1455  * @vm: requested vm
1456  * @start: start of mapped range
1457  * @last: last mapped entry
1458  * @flags: flags for the entries
1459  * @addr: addr to set the area to
1460  * @fence: optional resulting fence
1461  *
1462  * Fill in the page table entries between @start and @last.
1463  * Returns 0 for success, -EINVAL for failure.
1464  */
1465 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1466                                        struct dma_fence *exclusive,
1467                                        uint64_t src,
1468                                        dma_addr_t *pages_addr,
1469                                        struct amdgpu_vm *vm,
1470                                        uint64_t start, uint64_t last,
1471                                        uint64_t flags, uint64_t addr,
1472                                        struct dma_fence **fence)
1473 {
1474         struct amdgpu_ring *ring;
1475         void *owner = AMDGPU_FENCE_OWNER_VM;
1476         unsigned nptes, ncmds, ndw;
1477         struct amdgpu_job *job;
1478         struct amdgpu_pte_update_params params;
1479         struct dma_fence *f = NULL;
1480         int r;
1481
1482         memset(&params, 0, sizeof(params));
1483         params.adev = adev;
1484         params.vm = vm;
1485         params.src = src;
1486
1487         /* sync to everything on unmapping */
1488         if (!(flags & AMDGPU_PTE_VALID))
1489                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1490
1491         if (vm->use_cpu_for_update) {
1492                 /* params.src is used as flag to indicate system Memory */
1493                 if (pages_addr)
1494                         params.src = ~0;
1495
1496                 /* Wait for PT BOs to be free. PTs share the same resv. object
1497                  * as the root PD BO
1498                  */
1499                 r = amdgpu_vm_wait_pd(adev, vm, owner);
1500                 if (unlikely(r))
1501                         return r;
1502
1503                 params.func = amdgpu_vm_cpu_set_ptes;
1504                 params.pages_addr = pages_addr;
1505                 return amdgpu_vm_frag_ptes(&params, start, last + 1,
1506                                            addr, flags);
1507         }
1508
1509         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1510
1511         nptes = last - start + 1;
1512
1513         /*
1514          * reserve space for one command every (1 << BLOCK_SIZE)
1515          *  entries or 2k dwords (whatever is smaller)
1516          */
1517         ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
1518
1519         /* padding, etc. */
1520         ndw = 64;
1521
1522         /* one PDE write for each huge page */
1523         ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
1524
1525         if (src) {
1526                 /* only copy commands needed */
1527                 ndw += ncmds * 7;
1528
1529                 params.func = amdgpu_vm_do_copy_ptes;
1530
1531         } else if (pages_addr) {
1532                 /* copy commands needed */
1533                 ndw += ncmds * 7;
1534
1535                 /* and also PTEs */
1536                 ndw += nptes * 2;
1537
1538                 params.func = amdgpu_vm_do_copy_ptes;
1539
1540         } else {
1541                 /* set page commands needed */
1542                 ndw += ncmds * 10;
1543
1544                 /* two extra commands for begin/end of fragment */
1545                 ndw += 2 * 10;
1546
1547                 params.func = amdgpu_vm_do_set_ptes;
1548         }
1549
1550         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1551         if (r)
1552                 return r;
1553
1554         params.ib = &job->ibs[0];
1555
1556         if (!src && pages_addr) {
1557                 uint64_t *pte;
1558                 unsigned i;
1559
1560                 /* Put the PTEs at the end of the IB. */
1561                 i = ndw - nptes * 2;
1562                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1563                 params.src = job->ibs->gpu_addr + i * 4;
1564
1565                 for (i = 0; i < nptes; ++i) {
1566                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1567                                                     AMDGPU_GPU_PAGE_SIZE);
1568                         pte[i] |= flags;
1569                 }
1570                 addr = 0;
1571         }
1572
1573         r = amdgpu_sync_fence(adev, &job->sync, exclusive);
1574         if (r)
1575                 goto error_free;
1576
1577         r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1578                              owner);
1579         if (r)
1580                 goto error_free;
1581
1582         r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1583         if (r)
1584                 goto error_free;
1585
1586         r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1587         if (r)
1588                 goto error_free;
1589
1590         amdgpu_ring_pad_ib(ring, params.ib);
1591         WARN_ON(params.ib->length_dw > ndw);
1592         r = amdgpu_job_submit(job, ring, &vm->entity,
1593                               AMDGPU_FENCE_OWNER_VM, &f);
1594         if (r)
1595                 goto error_free;
1596
1597         amdgpu_bo_fence(vm->root.base.bo, f, true);
1598         dma_fence_put(*fence);
1599         *fence = f;
1600         return 0;
1601
1602 error_free:
1603         amdgpu_job_free(job);
1604         amdgpu_vm_invalidate_level(&vm->root);
1605         return r;
1606 }
1607
1608 /**
1609  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1610  *
1611  * @adev: amdgpu_device pointer
1612  * @exclusive: fence we need to sync to
1613  * @pages_addr: DMA addresses to use for mapping
1614  * @vm: requested vm
1615  * @mapping: mapped range and flags to use for the update
1616  * @flags: HW flags for the mapping
1617  * @nodes: array of drm_mm_nodes with the MC addresses
1618  * @fence: optional resulting fence
1619  *
1620  * Split the mapping into smaller chunks so that each update fits
1621  * into a SDMA IB.
1622  * Returns 0 for success, -EINVAL for failure.
1623  */
1624 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1625                                       struct dma_fence *exclusive,
1626                                       dma_addr_t *pages_addr,
1627                                       struct amdgpu_vm *vm,
1628                                       struct amdgpu_bo_va_mapping *mapping,
1629                                       uint64_t flags,
1630                                       struct drm_mm_node *nodes,
1631                                       struct dma_fence **fence)
1632 {
1633         uint64_t pfn, src = 0, start = mapping->start;
1634         int r;
1635
1636         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1637          * but in case of something, we filter the flags in first place
1638          */
1639         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1640                 flags &= ~AMDGPU_PTE_READABLE;
1641         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1642                 flags &= ~AMDGPU_PTE_WRITEABLE;
1643
1644         flags &= ~AMDGPU_PTE_EXECUTABLE;
1645         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1646
1647         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1648         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1649
1650         if ((mapping->flags & AMDGPU_PTE_PRT) &&
1651             (adev->asic_type >= CHIP_VEGA10)) {
1652                 flags |= AMDGPU_PTE_PRT;
1653                 flags &= ~AMDGPU_PTE_VALID;
1654         }
1655
1656         trace_amdgpu_vm_bo_update(mapping);
1657
1658         pfn = mapping->offset >> PAGE_SHIFT;
1659         if (nodes) {
1660                 while (pfn >= nodes->size) {
1661                         pfn -= nodes->size;
1662                         ++nodes;
1663                 }
1664         }
1665
1666         do {
1667                 uint64_t max_entries;
1668                 uint64_t addr, last;
1669
1670                 if (nodes) {
1671                         addr = nodes->start << PAGE_SHIFT;
1672                         max_entries = (nodes->size - pfn) *
1673                                 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1674                 } else {
1675                         addr = 0;
1676                         max_entries = S64_MAX;
1677                 }
1678
1679                 if (pages_addr) {
1680                         max_entries = min(max_entries, 16ull * 1024ull);
1681                         addr = 0;
1682                 } else if (flags & AMDGPU_PTE_VALID) {
1683                         addr += adev->vm_manager.vram_base_offset;
1684                 }
1685                 addr += pfn << PAGE_SHIFT;
1686
1687                 last = min((uint64_t)mapping->last, start + max_entries - 1);
1688                 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1689                                                 src, pages_addr, vm,
1690                                                 start, last, flags, addr,
1691                                                 fence);
1692                 if (r)
1693                         return r;
1694
1695                 pfn += last - start + 1;
1696                 if (nodes && nodes->size == pfn) {
1697                         pfn = 0;
1698                         ++nodes;
1699                 }
1700                 start = last + 1;
1701
1702         } while (unlikely(start != mapping->last + 1));
1703
1704         return 0;
1705 }
1706
1707 /**
1708  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1709  *
1710  * @adev: amdgpu_device pointer
1711  * @bo_va: requested BO and VM object
1712  * @clear: if true clear the entries
1713  *
1714  * Fill in the page table entries for @bo_va.
1715  * Returns 0 for success, -EINVAL for failure.
1716  */
1717 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1718                         struct amdgpu_bo_va *bo_va,
1719                         bool clear)
1720 {
1721         struct amdgpu_bo *bo = bo_va->base.bo;
1722         struct amdgpu_vm *vm = bo_va->base.vm;
1723         struct amdgpu_bo_va_mapping *mapping;
1724         dma_addr_t *pages_addr = NULL;
1725         struct ttm_mem_reg *mem;
1726         struct drm_mm_node *nodes;
1727         struct dma_fence *exclusive;
1728         uint64_t flags;
1729         int r;
1730
1731         if (clear || !bo_va->base.bo) {
1732                 mem = NULL;
1733                 nodes = NULL;
1734                 exclusive = NULL;
1735         } else {
1736                 struct ttm_dma_tt *ttm;
1737
1738                 mem = &bo_va->base.bo->tbo.mem;
1739                 nodes = mem->mm_node;
1740                 if (mem->mem_type == TTM_PL_TT) {
1741                         ttm = container_of(bo_va->base.bo->tbo.ttm,
1742                                            struct ttm_dma_tt, ttm);
1743                         pages_addr = ttm->dma_address;
1744                 }
1745                 exclusive = reservation_object_get_excl(bo->tbo.resv);
1746         }
1747
1748         if (bo)
1749                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1750         else
1751                 flags = 0x0;
1752
1753         if (!clear && bo_va->base.moved) {
1754                 bo_va->base.moved = false;
1755                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1756
1757         } else if (bo_va->cleared != clear) {
1758                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1759         }
1760
1761         list_for_each_entry(mapping, &bo_va->invalids, list) {
1762                 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1763                                                mapping, flags, nodes,
1764                                                &bo_va->last_pt_update);
1765                 if (r)
1766                         return r;
1767         }
1768
1769         if (vm->use_cpu_for_update) {
1770                 /* Flush HDP */
1771                 mb();
1772                 amdgpu_gart_flush_gpu_tlb(adev, 0);
1773         }
1774
1775         spin_lock(&vm->status_lock);
1776         list_del_init(&bo_va->base.vm_status);
1777         spin_unlock(&vm->status_lock);
1778
1779         list_splice_init(&bo_va->invalids, &bo_va->valids);
1780         bo_va->cleared = clear;
1781
1782         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1783                 list_for_each_entry(mapping, &bo_va->valids, list)
1784                         trace_amdgpu_vm_bo_mapping(mapping);
1785         }
1786
1787         return 0;
1788 }
1789
1790 /**
1791  * amdgpu_vm_update_prt_state - update the global PRT state
1792  */
1793 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1794 {
1795         unsigned long flags;
1796         bool enable;
1797
1798         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1799         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1800         adev->gart.gart_funcs->set_prt(adev, enable);
1801         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1802 }
1803
1804 /**
1805  * amdgpu_vm_prt_get - add a PRT user
1806  */
1807 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1808 {
1809         if (!adev->gart.gart_funcs->set_prt)
1810                 return;
1811
1812         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1813                 amdgpu_vm_update_prt_state(adev);
1814 }
1815
1816 /**
1817  * amdgpu_vm_prt_put - drop a PRT user
1818  */
1819 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1820 {
1821         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1822                 amdgpu_vm_update_prt_state(adev);
1823 }
1824
1825 /**
1826  * amdgpu_vm_prt_cb - callback for updating the PRT status
1827  */
1828 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1829 {
1830         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1831
1832         amdgpu_vm_prt_put(cb->adev);
1833         kfree(cb);
1834 }
1835
1836 /**
1837  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1838  */
1839 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1840                                  struct dma_fence *fence)
1841 {
1842         struct amdgpu_prt_cb *cb;
1843
1844         if (!adev->gart.gart_funcs->set_prt)
1845                 return;
1846
1847         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1848         if (!cb) {
1849                 /* Last resort when we are OOM */
1850                 if (fence)
1851                         dma_fence_wait(fence, false);
1852
1853                 amdgpu_vm_prt_put(adev);
1854         } else {
1855                 cb->adev = adev;
1856                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1857                                                      amdgpu_vm_prt_cb))
1858                         amdgpu_vm_prt_cb(fence, &cb->cb);
1859         }
1860 }
1861
1862 /**
1863  * amdgpu_vm_free_mapping - free a mapping
1864  *
1865  * @adev: amdgpu_device pointer
1866  * @vm: requested vm
1867  * @mapping: mapping to be freed
1868  * @fence: fence of the unmap operation
1869  *
1870  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1871  */
1872 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1873                                    struct amdgpu_vm *vm,
1874                                    struct amdgpu_bo_va_mapping *mapping,
1875                                    struct dma_fence *fence)
1876 {
1877         if (mapping->flags & AMDGPU_PTE_PRT)
1878                 amdgpu_vm_add_prt_cb(adev, fence);
1879         kfree(mapping);
1880 }
1881
1882 /**
1883  * amdgpu_vm_prt_fini - finish all prt mappings
1884  *
1885  * @adev: amdgpu_device pointer
1886  * @vm: requested vm
1887  *
1888  * Register a cleanup callback to disable PRT support after VM dies.
1889  */
1890 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1891 {
1892         struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1893         struct dma_fence *excl, **shared;
1894         unsigned i, shared_count;
1895         int r;
1896
1897         r = reservation_object_get_fences_rcu(resv, &excl,
1898                                               &shared_count, &shared);
1899         if (r) {
1900                 /* Not enough memory to grab the fence list, as last resort
1901                  * block for all the fences to complete.
1902                  */
1903                 reservation_object_wait_timeout_rcu(resv, true, false,
1904                                                     MAX_SCHEDULE_TIMEOUT);
1905                 return;
1906         }
1907
1908         /* Add a callback for each fence in the reservation object */
1909         amdgpu_vm_prt_get(adev);
1910         amdgpu_vm_add_prt_cb(adev, excl);
1911
1912         for (i = 0; i < shared_count; ++i) {
1913                 amdgpu_vm_prt_get(adev);
1914                 amdgpu_vm_add_prt_cb(adev, shared[i]);
1915         }
1916
1917         kfree(shared);
1918 }
1919
1920 /**
1921  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1922  *
1923  * @adev: amdgpu_device pointer
1924  * @vm: requested vm
1925  * @fence: optional resulting fence (unchanged if no work needed to be done
1926  * or if an error occurred)
1927  *
1928  * Make sure all freed BOs are cleared in the PT.
1929  * Returns 0 for success.
1930  *
1931  * PTs have to be reserved and mutex must be locked!
1932  */
1933 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1934                           struct amdgpu_vm *vm,
1935                           struct dma_fence **fence)
1936 {
1937         struct amdgpu_bo_va_mapping *mapping;
1938         struct dma_fence *f = NULL;
1939         int r;
1940         uint64_t init_pte_value = 0;
1941
1942         while (!list_empty(&vm->freed)) {
1943                 mapping = list_first_entry(&vm->freed,
1944                         struct amdgpu_bo_va_mapping, list);
1945                 list_del(&mapping->list);
1946
1947                 if (vm->pte_support_ats)
1948                         init_pte_value = AMDGPU_PTE_SYSTEM;
1949
1950                 r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
1951                                                 mapping->start, mapping->last,
1952                                                 init_pte_value, 0, &f);
1953                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1954                 if (r) {
1955                         dma_fence_put(f);
1956                         return r;
1957                 }
1958         }
1959
1960         if (fence && f) {
1961                 dma_fence_put(*fence);
1962                 *fence = f;
1963         } else {
1964                 dma_fence_put(f);
1965         }
1966
1967         return 0;
1968
1969 }
1970
1971 /**
1972  * amdgpu_vm_clear_moved - clear moved BOs in the PT
1973  *
1974  * @adev: amdgpu_device pointer
1975  * @vm: requested vm
1976  *
1977  * Make sure all moved BOs are cleared in the PT.
1978  * Returns 0 for success.
1979  *
1980  * PTs have to be reserved and mutex must be locked!
1981  */
1982 int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1983                             struct amdgpu_sync *sync)
1984 {
1985         struct amdgpu_bo_va *bo_va = NULL;
1986         int r = 0;
1987
1988         spin_lock(&vm->status_lock);
1989         while (!list_empty(&vm->moved)) {
1990                 bo_va = list_first_entry(&vm->moved,
1991                         struct amdgpu_bo_va, base.vm_status);
1992                 spin_unlock(&vm->status_lock);
1993
1994                 r = amdgpu_vm_bo_update(adev, bo_va, true);
1995                 if (r)
1996                         return r;
1997
1998                 spin_lock(&vm->status_lock);
1999         }
2000         spin_unlock(&vm->status_lock);
2001
2002         if (bo_va)
2003                 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
2004
2005         return r;
2006 }
2007
2008 /**
2009  * amdgpu_vm_bo_add - add a bo to a specific vm
2010  *
2011  * @adev: amdgpu_device pointer
2012  * @vm: requested vm
2013  * @bo: amdgpu buffer object
2014  *
2015  * Add @bo into the requested vm.
2016  * Add @bo to the list of bos associated with the vm
2017  * Returns newly added bo_va or NULL for failure
2018  *
2019  * Object has to be reserved!
2020  */
2021 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2022                                       struct amdgpu_vm *vm,
2023                                       struct amdgpu_bo *bo)
2024 {
2025         struct amdgpu_bo_va *bo_va;
2026
2027         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2028         if (bo_va == NULL) {
2029                 return NULL;
2030         }
2031         bo_va->base.vm = vm;
2032         bo_va->base.bo = bo;
2033         INIT_LIST_HEAD(&bo_va->base.bo_list);
2034         INIT_LIST_HEAD(&bo_va->base.vm_status);
2035
2036         bo_va->ref_count = 1;
2037         INIT_LIST_HEAD(&bo_va->valids);
2038         INIT_LIST_HEAD(&bo_va->invalids);
2039
2040         if (bo)
2041                 list_add_tail(&bo_va->base.bo_list, &bo->va);
2042
2043         return bo_va;
2044 }
2045
2046 /**
2047  * amdgpu_vm_bo_map - map bo inside a vm
2048  *
2049  * @adev: amdgpu_device pointer
2050  * @bo_va: bo_va to store the address
2051  * @saddr: where to map the BO
2052  * @offset: requested offset in the BO
2053  * @flags: attributes of pages (read/write/valid/etc.)
2054  *
2055  * Add a mapping of the BO at the specefied addr into the VM.
2056  * Returns 0 for success, error for failure.
2057  *
2058  * Object has to be reserved and unreserved outside!
2059  */
2060 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2061                      struct amdgpu_bo_va *bo_va,
2062                      uint64_t saddr, uint64_t offset,
2063                      uint64_t size, uint64_t flags)
2064 {
2065         struct amdgpu_bo_va_mapping *mapping, *tmp;
2066         struct amdgpu_bo *bo = bo_va->base.bo;
2067         struct amdgpu_vm *vm = bo_va->base.vm;
2068         uint64_t eaddr;
2069
2070         /* validate the parameters */
2071         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2072             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2073                 return -EINVAL;
2074
2075         /* make sure object fit at this offset */
2076         eaddr = saddr + size - 1;
2077         if (saddr >= eaddr ||
2078             (bo && offset + size > amdgpu_bo_size(bo)))
2079                 return -EINVAL;
2080
2081         saddr /= AMDGPU_GPU_PAGE_SIZE;
2082         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2083
2084         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2085         if (tmp) {
2086                 /* bo and tmp overlap, invalid addr */
2087                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2088                         "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2089                         tmp->start, tmp->last + 1);
2090                 return -EINVAL;
2091         }
2092
2093         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2094         if (!mapping)
2095                 return -ENOMEM;
2096
2097         INIT_LIST_HEAD(&mapping->list);
2098         mapping->start = saddr;
2099         mapping->last = eaddr;
2100         mapping->offset = offset;
2101         mapping->flags = flags;
2102
2103         list_add(&mapping->list, &bo_va->invalids);
2104         amdgpu_vm_it_insert(mapping, &vm->va);
2105
2106         if (flags & AMDGPU_PTE_PRT)
2107                 amdgpu_vm_prt_get(adev);
2108         trace_amdgpu_vm_bo_map(bo_va, mapping);
2109
2110         return 0;
2111 }
2112
2113 /**
2114  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2115  *
2116  * @adev: amdgpu_device pointer
2117  * @bo_va: bo_va to store the address
2118  * @saddr: where to map the BO
2119  * @offset: requested offset in the BO
2120  * @flags: attributes of pages (read/write/valid/etc.)
2121  *
2122  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2123  * mappings as we do so.
2124  * Returns 0 for success, error for failure.
2125  *
2126  * Object has to be reserved and unreserved outside!
2127  */
2128 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2129                              struct amdgpu_bo_va *bo_va,
2130                              uint64_t saddr, uint64_t offset,
2131                              uint64_t size, uint64_t flags)
2132 {
2133         struct amdgpu_bo_va_mapping *mapping;
2134         struct amdgpu_bo *bo = bo_va->base.bo;
2135         struct amdgpu_vm *vm = bo_va->base.vm;
2136         uint64_t eaddr;
2137         int r;
2138
2139         /* validate the parameters */
2140         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2141             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2142                 return -EINVAL;
2143
2144         /* make sure object fit at this offset */
2145         eaddr = saddr + size - 1;
2146         if (saddr >= eaddr ||
2147             (bo && offset + size > amdgpu_bo_size(bo)))
2148                 return -EINVAL;
2149
2150         /* Allocate all the needed memory */
2151         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2152         if (!mapping)
2153                 return -ENOMEM;
2154
2155         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2156         if (r) {
2157                 kfree(mapping);
2158                 return r;
2159         }
2160
2161         saddr /= AMDGPU_GPU_PAGE_SIZE;
2162         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2163
2164         mapping->start = saddr;
2165         mapping->last = eaddr;
2166         mapping->offset = offset;
2167         mapping->flags = flags;
2168
2169         list_add(&mapping->list, &bo_va->invalids);
2170         amdgpu_vm_it_insert(mapping, &vm->va);
2171
2172         if (flags & AMDGPU_PTE_PRT)
2173                 amdgpu_vm_prt_get(adev);
2174         trace_amdgpu_vm_bo_map(bo_va, mapping);
2175
2176         return 0;
2177 }
2178
2179 /**
2180  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2181  *
2182  * @adev: amdgpu_device pointer
2183  * @bo_va: bo_va to remove the address from
2184  * @saddr: where to the BO is mapped
2185  *
2186  * Remove a mapping of the BO at the specefied addr from the VM.
2187  * Returns 0 for success, error for failure.
2188  *
2189  * Object has to be reserved and unreserved outside!
2190  */
2191 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2192                        struct amdgpu_bo_va *bo_va,
2193                        uint64_t saddr)
2194 {
2195         struct amdgpu_bo_va_mapping *mapping;
2196         struct amdgpu_vm *vm = bo_va->base.vm;
2197         bool valid = true;
2198
2199         saddr /= AMDGPU_GPU_PAGE_SIZE;
2200
2201         list_for_each_entry(mapping, &bo_va->valids, list) {
2202                 if (mapping->start == saddr)
2203                         break;
2204         }
2205
2206         if (&mapping->list == &bo_va->valids) {
2207                 valid = false;
2208
2209                 list_for_each_entry(mapping, &bo_va->invalids, list) {
2210                         if (mapping->start == saddr)
2211                                 break;
2212                 }
2213
2214                 if (&mapping->list == &bo_va->invalids)
2215                         return -ENOENT;
2216         }
2217
2218         list_del(&mapping->list);
2219         amdgpu_vm_it_remove(mapping, &vm->va);
2220         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2221
2222         if (valid)
2223                 list_add(&mapping->list, &vm->freed);
2224         else
2225                 amdgpu_vm_free_mapping(adev, vm, mapping,
2226                                        bo_va->last_pt_update);
2227
2228         return 0;
2229 }
2230
2231 /**
2232  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2233  *
2234  * @adev: amdgpu_device pointer
2235  * @vm: VM structure to use
2236  * @saddr: start of the range
2237  * @size: size of the range
2238  *
2239  * Remove all mappings in a range, split them as appropriate.
2240  * Returns 0 for success, error for failure.
2241  */
2242 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2243                                 struct amdgpu_vm *vm,
2244                                 uint64_t saddr, uint64_t size)
2245 {
2246         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2247         LIST_HEAD(removed);
2248         uint64_t eaddr;
2249
2250         eaddr = saddr + size - 1;
2251         saddr /= AMDGPU_GPU_PAGE_SIZE;
2252         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2253
2254         /* Allocate all the needed memory */
2255         before = kzalloc(sizeof(*before), GFP_KERNEL);
2256         if (!before)
2257                 return -ENOMEM;
2258         INIT_LIST_HEAD(&before->list);
2259
2260         after = kzalloc(sizeof(*after), GFP_KERNEL);
2261         if (!after) {
2262                 kfree(before);
2263                 return -ENOMEM;
2264         }
2265         INIT_LIST_HEAD(&after->list);
2266
2267         /* Now gather all removed mappings */
2268         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2269         while (tmp) {
2270                 /* Remember mapping split at the start */
2271                 if (tmp->start < saddr) {
2272                         before->start = tmp->start;
2273                         before->last = saddr - 1;
2274                         before->offset = tmp->offset;
2275                         before->flags = tmp->flags;
2276                         list_add(&before->list, &tmp->list);
2277                 }
2278
2279                 /* Remember mapping split at the end */
2280                 if (tmp->last > eaddr) {
2281                         after->start = eaddr + 1;
2282                         after->last = tmp->last;
2283                         after->offset = tmp->offset;
2284                         after->offset += after->start - tmp->start;
2285                         after->flags = tmp->flags;
2286                         list_add(&after->list, &tmp->list);
2287                 }
2288
2289                 list_del(&tmp->list);
2290                 list_add(&tmp->list, &removed);
2291
2292                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2293         }
2294
2295         /* And free them up */
2296         list_for_each_entry_safe(tmp, next, &removed, list) {
2297                 amdgpu_vm_it_remove(tmp, &vm->va);
2298                 list_del(&tmp->list);
2299
2300                 if (tmp->start < saddr)
2301                     tmp->start = saddr;
2302                 if (tmp->last > eaddr)
2303                     tmp->last = eaddr;
2304
2305                 list_add(&tmp->list, &vm->freed);
2306                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2307         }
2308
2309         /* Insert partial mapping before the range */
2310         if (!list_empty(&before->list)) {
2311                 amdgpu_vm_it_insert(before, &vm->va);
2312                 if (before->flags & AMDGPU_PTE_PRT)
2313                         amdgpu_vm_prt_get(adev);
2314         } else {
2315                 kfree(before);
2316         }
2317
2318         /* Insert partial mapping after the range */
2319         if (!list_empty(&after->list)) {
2320                 amdgpu_vm_it_insert(after, &vm->va);
2321                 if (after->flags & AMDGPU_PTE_PRT)
2322                         amdgpu_vm_prt_get(adev);
2323         } else {
2324                 kfree(after);
2325         }
2326
2327         return 0;
2328 }
2329
2330 /**
2331  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2332  *
2333  * @adev: amdgpu_device pointer
2334  * @bo_va: requested bo_va
2335  *
2336  * Remove @bo_va->bo from the requested vm.
2337  *
2338  * Object have to be reserved!
2339  */
2340 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2341                       struct amdgpu_bo_va *bo_va)
2342 {
2343         struct amdgpu_bo_va_mapping *mapping, *next;
2344         struct amdgpu_vm *vm = bo_va->base.vm;
2345
2346         list_del(&bo_va->base.bo_list);
2347
2348         spin_lock(&vm->status_lock);
2349         list_del(&bo_va->base.vm_status);
2350         spin_unlock(&vm->status_lock);
2351
2352         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2353                 list_del(&mapping->list);
2354                 amdgpu_vm_it_remove(mapping, &vm->va);
2355                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2356                 list_add(&mapping->list, &vm->freed);
2357         }
2358         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2359                 list_del(&mapping->list);
2360                 amdgpu_vm_it_remove(mapping, &vm->va);
2361                 amdgpu_vm_free_mapping(adev, vm, mapping,
2362                                        bo_va->last_pt_update);
2363         }
2364
2365         dma_fence_put(bo_va->last_pt_update);
2366         kfree(bo_va);
2367 }
2368
2369 /**
2370  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2371  *
2372  * @adev: amdgpu_device pointer
2373  * @vm: requested vm
2374  * @bo: amdgpu buffer object
2375  *
2376  * Mark @bo as invalid.
2377  */
2378 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2379                              struct amdgpu_bo *bo, bool evicted)
2380 {
2381         struct amdgpu_vm_bo_base *bo_base;
2382
2383         list_for_each_entry(bo_base, &bo->va, bo_list) {
2384                 struct amdgpu_vm *vm = bo_base->vm;
2385
2386                 bo_base->moved = true;
2387                 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2388                         spin_lock(&bo_base->vm->status_lock);
2389                         list_move(&bo_base->vm_status, &vm->evicted);
2390                         spin_unlock(&bo_base->vm->status_lock);
2391                         continue;
2392                 }
2393
2394                 /* Don't add page tables to the moved state */
2395                 if (bo->tbo.type == ttm_bo_type_kernel)
2396                         continue;
2397
2398                 spin_lock(&bo_base->vm->status_lock);
2399                 list_move(&bo_base->vm_status, &bo_base->vm->moved);
2400                 spin_unlock(&bo_base->vm->status_lock);
2401         }
2402 }
2403
2404 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2405 {
2406         /* Total bits covered by PD + PTs */
2407         unsigned bits = ilog2(vm_size) + 18;
2408
2409         /* Make sure the PD is 4K in size up to 8GB address space.
2410            Above that split equal between PD and PTs */
2411         if (vm_size <= 8)
2412                 return (bits - 9);
2413         else
2414                 return ((bits + 3) / 2);
2415 }
2416
2417 /**
2418  * amdgpu_vm_set_fragment_size - adjust fragment size in PTE
2419  *
2420  * @adev: amdgpu_device pointer
2421  * @fragment_size_default: the default fragment size if it's set auto
2422  */
2423 void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
2424 {
2425         if (amdgpu_vm_fragment_size == -1)
2426                 adev->vm_manager.fragment_size = fragment_size_default;
2427         else
2428                 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2429 }
2430
2431 /**
2432  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2433  *
2434  * @adev: amdgpu_device pointer
2435  * @vm_size: the default vm size if it's set auto
2436  */
2437 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
2438 {
2439         /* adjust vm size firstly */
2440         if (amdgpu_vm_size == -1)
2441                 adev->vm_manager.vm_size = vm_size;
2442         else
2443                 adev->vm_manager.vm_size = amdgpu_vm_size;
2444
2445         /* block size depends on vm size */
2446         if (amdgpu_vm_block_size == -1)
2447                 adev->vm_manager.block_size =
2448                         amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
2449         else
2450                 adev->vm_manager.block_size = amdgpu_vm_block_size;
2451
2452         amdgpu_vm_set_fragment_size(adev, fragment_size_default);
2453
2454         DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
2455                 adev->vm_manager.vm_size, adev->vm_manager.block_size,
2456                 adev->vm_manager.fragment_size);
2457 }
2458
2459 /**
2460  * amdgpu_vm_init - initialize a vm instance
2461  *
2462  * @adev: amdgpu_device pointer
2463  * @vm: requested vm
2464  * @vm_context: Indicates if it GFX or Compute context
2465  *
2466  * Init @vm fields.
2467  */
2468 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2469                    int vm_context)
2470 {
2471         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2472                 AMDGPU_VM_PTE_COUNT(adev) * 8);
2473         unsigned ring_instance;
2474         struct amdgpu_ring *ring;
2475         struct amd_sched_rq *rq;
2476         int r, i;
2477         u64 flags;
2478         uint64_t init_pde_value = 0;
2479
2480         vm->va = RB_ROOT;
2481         vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
2482         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2483                 vm->reserved_vmid[i] = NULL;
2484         spin_lock_init(&vm->status_lock);
2485         INIT_LIST_HEAD(&vm->evicted);
2486         INIT_LIST_HEAD(&vm->moved);
2487         INIT_LIST_HEAD(&vm->freed);
2488
2489         /* create scheduler entity for page table updates */
2490
2491         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2492         ring_instance %= adev->vm_manager.vm_pte_num_rings;
2493         ring = adev->vm_manager.vm_pte_rings[ring_instance];
2494         rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
2495         r = amd_sched_entity_init(&ring->sched, &vm->entity,
2496                                   rq, amdgpu_sched_jobs);
2497         if (r)
2498                 return r;
2499
2500         vm->pte_support_ats = false;
2501
2502         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2503                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2504                                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2505
2506                 if (adev->asic_type == CHIP_RAVEN) {
2507                         vm->pte_support_ats = true;
2508                         init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
2509                 }
2510         } else
2511                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2512                                                 AMDGPU_VM_USE_CPU_FOR_GFX);
2513         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2514                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2515         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2516                   "CPU update of VM recommended only for large BAR system\n");
2517         vm->last_dir_update = NULL;
2518
2519         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
2520                         AMDGPU_GEM_CREATE_VRAM_CLEARED;
2521         if (vm->use_cpu_for_update)
2522                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2523         else
2524                 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
2525                                 AMDGPU_GEM_CREATE_SHADOW);
2526
2527         r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
2528                              AMDGPU_GEM_DOMAIN_VRAM,
2529                              flags,
2530                              NULL, NULL, init_pde_value, &vm->root.base.bo);
2531         if (r)
2532                 goto error_free_sched_entity;
2533
2534         vm->root.base.vm = vm;
2535         list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2536         INIT_LIST_HEAD(&vm->root.base.vm_status);
2537
2538         if (vm->use_cpu_for_update) {
2539                 r = amdgpu_bo_reserve(vm->root.base.bo, false);
2540                 if (r)
2541                         goto error_free_root;
2542
2543                 r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
2544                 if (r)
2545                         goto error_free_root;
2546                 amdgpu_bo_unreserve(vm->root.base.bo);
2547         }
2548
2549         return 0;
2550
2551 error_free_root:
2552         amdgpu_bo_unref(&vm->root.base.bo->shadow);
2553         amdgpu_bo_unref(&vm->root.base.bo);
2554         vm->root.base.bo = NULL;
2555
2556 error_free_sched_entity:
2557         amd_sched_entity_fini(&ring->sched, &vm->entity);
2558
2559         return r;
2560 }
2561
2562 /**
2563  * amdgpu_vm_free_levels - free PD/PT levels
2564  *
2565  * @level: PD/PT starting level to free
2566  *
2567  * Free the page directory or page table level and all sub levels.
2568  */
2569 static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
2570 {
2571         unsigned i;
2572
2573         if (level->base.bo) {
2574                 list_del(&level->base.bo_list);
2575                 list_del(&level->base.vm_status);
2576                 amdgpu_bo_unref(&level->base.bo->shadow);
2577                 amdgpu_bo_unref(&level->base.bo);
2578         }
2579
2580         if (level->entries)
2581                 for (i = 0; i <= level->last_entry_used; i++)
2582                         amdgpu_vm_free_levels(&level->entries[i]);
2583
2584         kvfree(level->entries);
2585 }
2586
2587 /**
2588  * amdgpu_vm_fini - tear down a vm instance
2589  *
2590  * @adev: amdgpu_device pointer
2591  * @vm: requested vm
2592  *
2593  * Tear down @vm.
2594  * Unbind the VM and remove all bos from the vm bo list
2595  */
2596 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2597 {
2598         struct amdgpu_bo_va_mapping *mapping, *tmp;
2599         bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
2600         int i;
2601
2602         amd_sched_entity_fini(vm->entity.sched, &vm->entity);
2603
2604         if (!RB_EMPTY_ROOT(&vm->va)) {
2605                 dev_err(adev->dev, "still active bo inside vm\n");
2606         }
2607         rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
2608                 list_del(&mapping->list);
2609                 amdgpu_vm_it_remove(mapping, &vm->va);
2610                 kfree(mapping);
2611         }
2612         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2613                 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2614                         amdgpu_vm_prt_fini(adev, vm);
2615                         prt_fini_needed = false;
2616                 }
2617
2618                 list_del(&mapping->list);
2619                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2620         }
2621
2622         amdgpu_vm_free_levels(&vm->root);
2623         dma_fence_put(vm->last_dir_update);
2624         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2625                 amdgpu_vm_free_reserved_vmid(adev, vm, i);
2626 }
2627
2628 /**
2629  * amdgpu_vm_manager_init - init the VM manager
2630  *
2631  * @adev: amdgpu_device pointer
2632  *
2633  * Initialize the VM manager structures
2634  */
2635 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2636 {
2637         unsigned i, j;
2638
2639         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
2640                 struct amdgpu_vm_id_manager *id_mgr =
2641                         &adev->vm_manager.id_mgr[i];
2642
2643                 mutex_init(&id_mgr->lock);
2644                 INIT_LIST_HEAD(&id_mgr->ids_lru);
2645                 atomic_set(&id_mgr->reserved_vmid_num, 0);
2646
2647                 /* skip over VMID 0, since it is the system VM */
2648                 for (j = 1; j < id_mgr->num_ids; ++j) {
2649                         amdgpu_vm_reset_id(adev, i, j);
2650                         amdgpu_sync_create(&id_mgr->ids[i].active);
2651                         list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
2652                 }
2653         }
2654
2655         adev->vm_manager.fence_context =
2656                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2657         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2658                 adev->vm_manager.seqno[i] = 0;
2659
2660         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2661         atomic64_set(&adev->vm_manager.client_counter, 0);
2662         spin_lock_init(&adev->vm_manager.prt_lock);
2663         atomic_set(&adev->vm_manager.num_prt_users, 0);
2664
2665         /* If not overridden by the user, by default, only in large BAR systems
2666          * Compute VM tables will be updated by CPU
2667          */
2668 #ifdef CONFIG_X86_64
2669         if (amdgpu_vm_update_mode == -1) {
2670                 if (amdgpu_vm_is_large_bar(adev))
2671                         adev->vm_manager.vm_update_mode =
2672                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2673                 else
2674                         adev->vm_manager.vm_update_mode = 0;
2675         } else
2676                 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2677 #else
2678         adev->vm_manager.vm_update_mode = 0;
2679 #endif
2680
2681 }
2682
2683 /**
2684  * amdgpu_vm_manager_fini - cleanup VM manager
2685  *
2686  * @adev: amdgpu_device pointer
2687  *
2688  * Cleanup the VM manager and free resources.
2689  */
2690 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2691 {
2692         unsigned i, j;
2693
2694         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
2695                 struct amdgpu_vm_id_manager *id_mgr =
2696                         &adev->vm_manager.id_mgr[i];
2697
2698                 mutex_destroy(&id_mgr->lock);
2699                 for (j = 0; j < AMDGPU_NUM_VM; ++j) {
2700                         struct amdgpu_vm_id *id = &id_mgr->ids[j];
2701
2702                         amdgpu_sync_free(&id->active);
2703                         dma_fence_put(id->flushed_updates);
2704                         dma_fence_put(id->last_flush);
2705                 }
2706         }
2707 }
2708
2709 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2710 {
2711         union drm_amdgpu_vm *args = data;
2712         struct amdgpu_device *adev = dev->dev_private;
2713         struct amdgpu_fpriv *fpriv = filp->driver_priv;
2714         int r;
2715
2716         switch (args->in.op) {
2717         case AMDGPU_VM_OP_RESERVE_VMID:
2718                 /* current, we only have requirement to reserve vmid from gfxhub */
2719                 r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm,
2720                                                   AMDGPU_GFXHUB);
2721                 if (r)
2722                         return r;
2723                 break;
2724         case AMDGPU_VM_OP_UNRESERVE_VMID:
2725                 amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB);
2726                 break;
2727         default:
2728                 return -EINVAL;
2729         }
2730
2731         return 0;
2732 }
This page took 0.199218 seconds and 4 git commands to generate.