]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drm/amdgpu: fix and cleanup shadow handling
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <drm/drmP.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34
35 /*
36  * GPUVM
37  * GPUVM is similar to the legacy gart on older asics, however
38  * rather than there being a single global gart table
39  * for the entire GPU, there are multiple VM page tables active
40  * at any given time.  The VM page tables can contain a mix
41  * vram pages and system memory pages and system memory pages
42  * can be mapped as snooped (cached system pages) or unsnooped
43  * (uncached system pages).
44  * Each VM has an ID associated with it and there is a page table
45  * associated with each VMID.  When execting a command buffer,
46  * the kernel tells the the ring what VMID to use for that command
47  * buffer.  VMIDs are allocated dynamically as commands are submitted.
48  * The userspace drivers maintain their own address space and the kernel
49  * sets up their pages tables accordingly when they submit their
50  * command buffers and a VMID is assigned.
51  * Cayman/Trinity support up to 8 active VMs at any given time;
52  * SI supports 16.
53  */
54
55 #define START(node) ((node)->start)
56 #define LAST(node) ((node)->last)
57
58 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
59                      START, LAST, static, amdgpu_vm_it)
60
61 #undef START
62 #undef LAST
63
64 /* Local structure. Encapsulate some VM table update parameters to reduce
65  * the number of function parameters
66  */
67 struct amdgpu_pte_update_params {
68         /* amdgpu device we do this update for */
69         struct amdgpu_device *adev;
70         /* optional amdgpu_vm we do this update for */
71         struct amdgpu_vm *vm;
72         /* address where to copy page table entries from */
73         uint64_t src;
74         /* indirect buffer to fill with commands */
75         struct amdgpu_ib *ib;
76         /* Function which actually does the update */
77         void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
78                      uint64_t addr, unsigned count, uint32_t incr,
79                      uint64_t flags);
80         /* The next two are used during VM update by CPU
81          *  DMA addresses to use for mapping
82          *  Kernel pointer of PD/PT BO that needs to be updated
83          */
84         dma_addr_t *pages_addr;
85         void *kptr;
86 };
87
88 /* Helper to disable partial resident texture feature from a fence callback */
89 struct amdgpu_prt_cb {
90         struct amdgpu_device *adev;
91         struct dma_fence_cb cb;
92 };
93
94 /**
95  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
96  *
97  * @adev: amdgpu_device pointer
98  *
99  * Calculate the number of entries in a page directory or page table.
100  */
101 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
102                                       unsigned level)
103 {
104         if (level == 0)
105                 /* For the root directory */
106                 return adev->vm_manager.max_pfn >>
107                         (adev->vm_manager.block_size *
108                          adev->vm_manager.num_level);
109         else if (level == adev->vm_manager.num_level)
110                 /* For the page tables on the leaves */
111                 return AMDGPU_VM_PTE_COUNT(adev);
112         else
113                 /* Everything in between */
114                 return 1 << adev->vm_manager.block_size;
115 }
116
117 /**
118  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
119  *
120  * @adev: amdgpu_device pointer
121  *
122  * Calculate the size of the BO for a page directory or page table in bytes.
123  */
124 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
125 {
126         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
127 }
128
129 /**
130  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
131  *
132  * @vm: vm providing the BOs
133  * @validated: head of validation list
134  * @entry: entry to add
135  *
136  * Add the page directory to the list of BOs to
137  * validate for command submission.
138  */
139 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
140                          struct list_head *validated,
141                          struct amdgpu_bo_list_entry *entry)
142 {
143         entry->robj = vm->root.bo;
144         entry->priority = 0;
145         entry->tv.bo = &entry->robj->tbo;
146         entry->tv.shared = true;
147         entry->user_pages = NULL;
148         list_add(&entry->tv.head, validated);
149 }
150
151 /**
152  * amdgpu_vm_validate_layer - validate a single page table level
153  *
154  * @parent: parent page table level
155  * @validate: callback to do the validation
156  * @param: parameter for the validation callback
157  *
158  * Validate the page table BOs on command submission if neccessary.
159  */
160 static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
161                                     int (*validate)(void *, struct amdgpu_bo *),
162                                     void *param, bool use_cpu_for_update,
163                                     struct ttm_bo_global *glob)
164 {
165         unsigned i;
166         int r;
167
168         if (use_cpu_for_update) {
169                 r = amdgpu_bo_kmap(parent->bo, NULL);
170                 if (r)
171                         return r;
172         }
173
174         if (!parent->entries)
175                 return 0;
176
177         for (i = 0; i <= parent->last_entry_used; ++i) {
178                 struct amdgpu_vm_pt *entry = &parent->entries[i];
179
180                 if (!entry->bo)
181                         continue;
182
183                 r = validate(param, entry->bo);
184                 if (r)
185                         return r;
186
187                 spin_lock(&glob->lru_lock);
188                 ttm_bo_move_to_lru_tail(&entry->bo->tbo);
189                 if (entry->bo->shadow)
190                         ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo);
191                 spin_unlock(&glob->lru_lock);
192
193                 /*
194                  * Recurse into the sub directory. This is harmless because we
195                  * have only a maximum of 5 layers.
196                  */
197                 r = amdgpu_vm_validate_level(entry, validate, param,
198                                              use_cpu_for_update, glob);
199                 if (r)
200                         return r;
201         }
202
203         return r;
204 }
205
206 /**
207  * amdgpu_vm_validate_pt_bos - validate the page table BOs
208  *
209  * @adev: amdgpu device pointer
210  * @vm: vm providing the BOs
211  * @validate: callback to do the validation
212  * @param: parameter for the validation callback
213  *
214  * Validate the page table BOs on command submission if neccessary.
215  */
216 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
217                               int (*validate)(void *p, struct amdgpu_bo *bo),
218                               void *param)
219 {
220         uint64_t num_evictions;
221
222         /* We only need to validate the page tables
223          * if they aren't already valid.
224          */
225         num_evictions = atomic64_read(&adev->num_evictions);
226         if (num_evictions == vm->last_eviction_counter)
227                 return 0;
228
229         return amdgpu_vm_validate_level(&vm->root, validate, param,
230                                         vm->use_cpu_for_update,
231                                         adev->mman.bdev.glob);
232 }
233
234 /**
235  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
236  *
237  * @adev: amdgpu_device pointer
238  * @vm: requested vm
239  * @saddr: start of the address range
240  * @eaddr: end of the address range
241  *
242  * Make sure the page directories and page tables are allocated
243  */
244 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
245                                   struct amdgpu_vm *vm,
246                                   struct amdgpu_vm_pt *parent,
247                                   uint64_t saddr, uint64_t eaddr,
248                                   unsigned level)
249 {
250         unsigned shift = (adev->vm_manager.num_level - level) *
251                 adev->vm_manager.block_size;
252         unsigned pt_idx, from, to;
253         int r;
254         u64 flags;
255         uint64_t init_value = 0;
256
257         if (!parent->entries) {
258                 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
259
260                 parent->entries = kvmalloc_array(num_entries,
261                                                    sizeof(struct amdgpu_vm_pt),
262                                                    GFP_KERNEL | __GFP_ZERO);
263                 if (!parent->entries)
264                         return -ENOMEM;
265                 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
266         }
267
268         from = saddr >> shift;
269         to = eaddr >> shift;
270         if (from >= amdgpu_vm_num_entries(adev, level) ||
271             to >= amdgpu_vm_num_entries(adev, level))
272                 return -EINVAL;
273
274         if (to > parent->last_entry_used)
275                 parent->last_entry_used = to;
276
277         ++level;
278         saddr = saddr & ((1 << shift) - 1);
279         eaddr = eaddr & ((1 << shift) - 1);
280
281         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
282                         AMDGPU_GEM_CREATE_VRAM_CLEARED;
283         if (vm->use_cpu_for_update)
284                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
285         else
286                 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
287                                 AMDGPU_GEM_CREATE_SHADOW);
288
289         if (vm->pte_support_ats) {
290                 init_value = AMDGPU_PTE_SYSTEM;
291                 if (level != adev->vm_manager.num_level - 1)
292                         init_value |= AMDGPU_PDE_PTE;
293         }
294
295         /* walk over the address space and allocate the page tables */
296         for (pt_idx = from; pt_idx <= to; ++pt_idx) {
297                 struct reservation_object *resv = vm->root.bo->tbo.resv;
298                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
299                 struct amdgpu_bo *pt;
300
301                 if (!entry->bo) {
302                         r = amdgpu_bo_create(adev,
303                                              amdgpu_vm_bo_size(adev, level),
304                                              AMDGPU_GPU_PAGE_SIZE, true,
305                                              AMDGPU_GEM_DOMAIN_VRAM,
306                                              flags,
307                                              NULL, resv, init_value, &pt);
308                         if (r)
309                                 return r;
310
311                         if (vm->use_cpu_for_update) {
312                                 r = amdgpu_bo_kmap(pt, NULL);
313                                 if (r) {
314                                         amdgpu_bo_unref(&pt);
315                                         return r;
316                                 }
317                         }
318
319                         /* Keep a reference to the root directory to avoid
320                         * freeing them up in the wrong order.
321                         */
322                         pt->parent = amdgpu_bo_ref(vm->root.bo);
323
324                         entry->bo = pt;
325                         entry->addr = 0;
326                 }
327
328                 if (level < adev->vm_manager.num_level) {
329                         uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
330                         uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
331                                 ((1 << shift) - 1);
332                         r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
333                                                    sub_eaddr, level);
334                         if (r)
335                                 return r;
336                 }
337         }
338
339         return 0;
340 }
341
342 /**
343  * amdgpu_vm_alloc_pts - Allocate page tables.
344  *
345  * @adev: amdgpu_device pointer
346  * @vm: VM to allocate page tables for
347  * @saddr: Start address which needs to be allocated
348  * @size: Size from start address we need.
349  *
350  * Make sure the page tables are allocated.
351  */
352 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
353                         struct amdgpu_vm *vm,
354                         uint64_t saddr, uint64_t size)
355 {
356         uint64_t last_pfn;
357         uint64_t eaddr;
358
359         /* validate the parameters */
360         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
361                 return -EINVAL;
362
363         eaddr = saddr + size - 1;
364         last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
365         if (last_pfn >= adev->vm_manager.max_pfn) {
366                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
367                         last_pfn, adev->vm_manager.max_pfn);
368                 return -EINVAL;
369         }
370
371         saddr /= AMDGPU_GPU_PAGE_SIZE;
372         eaddr /= AMDGPU_GPU_PAGE_SIZE;
373
374         return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
375 }
376
377 /**
378  * amdgpu_vm_had_gpu_reset - check if reset occured since last use
379  *
380  * @adev: amdgpu_device pointer
381  * @id: VMID structure
382  *
383  * Check if GPU reset occured since last use of the VMID.
384  */
385 static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
386                                     struct amdgpu_vm_id *id)
387 {
388         return id->current_gpu_reset_count !=
389                 atomic_read(&adev->gpu_reset_counter);
390 }
391
392 static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
393 {
394         return !!vm->reserved_vmid[vmhub];
395 }
396
397 /* idr_mgr->lock must be held */
398 static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
399                                                struct amdgpu_ring *ring,
400                                                struct amdgpu_sync *sync,
401                                                struct dma_fence *fence,
402                                                struct amdgpu_job *job)
403 {
404         struct amdgpu_device *adev = ring->adev;
405         unsigned vmhub = ring->funcs->vmhub;
406         uint64_t fence_context = adev->fence_context + ring->idx;
407         struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
408         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
409         struct dma_fence *updates = sync->last_vm_update;
410         int r = 0;
411         struct dma_fence *flushed, *tmp;
412         bool needs_flush = vm->use_cpu_for_update;
413
414         flushed  = id->flushed_updates;
415         if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
416             (atomic64_read(&id->owner) != vm->client_id) ||
417             (job->vm_pd_addr != id->pd_gpu_addr) ||
418             (updates && (!flushed || updates->context != flushed->context ||
419                         dma_fence_is_later(updates, flushed))) ||
420             (!id->last_flush || (id->last_flush->context != fence_context &&
421                                  !dma_fence_is_signaled(id->last_flush)))) {
422                 needs_flush = true;
423                 /* to prevent one context starved by another context */
424                 id->pd_gpu_addr = 0;
425                 tmp = amdgpu_sync_peek_fence(&id->active, ring);
426                 if (tmp) {
427                         r = amdgpu_sync_fence(adev, sync, tmp);
428                         return r;
429                 }
430         }
431
432         /* Good we can use this VMID. Remember this submission as
433         * user of the VMID.
434         */
435         r = amdgpu_sync_fence(ring->adev, &id->active, fence);
436         if (r)
437                 goto out;
438
439         if (updates && (!flushed || updates->context != flushed->context ||
440                         dma_fence_is_later(updates, flushed))) {
441                 dma_fence_put(id->flushed_updates);
442                 id->flushed_updates = dma_fence_get(updates);
443         }
444         id->pd_gpu_addr = job->vm_pd_addr;
445         atomic64_set(&id->owner, vm->client_id);
446         job->vm_needs_flush = needs_flush;
447         if (needs_flush) {
448                 dma_fence_put(id->last_flush);
449                 id->last_flush = NULL;
450         }
451         job->vm_id = id - id_mgr->ids;
452         trace_amdgpu_vm_grab_id(vm, ring, job);
453 out:
454         return r;
455 }
456
457 /**
458  * amdgpu_vm_grab_id - allocate the next free VMID
459  *
460  * @vm: vm to allocate id for
461  * @ring: ring we want to submit job to
462  * @sync: sync object where we add dependencies
463  * @fence: fence protecting ID from reuse
464  *
465  * Allocate an id for the vm, adding fences to the sync obj as necessary.
466  */
467 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
468                       struct amdgpu_sync *sync, struct dma_fence *fence,
469                       struct amdgpu_job *job)
470 {
471         struct amdgpu_device *adev = ring->adev;
472         unsigned vmhub = ring->funcs->vmhub;
473         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
474         uint64_t fence_context = adev->fence_context + ring->idx;
475         struct dma_fence *updates = sync->last_vm_update;
476         struct amdgpu_vm_id *id, *idle;
477         struct dma_fence **fences;
478         unsigned i;
479         int r = 0;
480
481         mutex_lock(&id_mgr->lock);
482         if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
483                 r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
484                 mutex_unlock(&id_mgr->lock);
485                 return r;
486         }
487         fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
488         if (!fences) {
489                 mutex_unlock(&id_mgr->lock);
490                 return -ENOMEM;
491         }
492         /* Check if we have an idle VMID */
493         i = 0;
494         list_for_each_entry(idle, &id_mgr->ids_lru, list) {
495                 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
496                 if (!fences[i])
497                         break;
498                 ++i;
499         }
500
501         /* If we can't find a idle VMID to use, wait till one becomes available */
502         if (&idle->list == &id_mgr->ids_lru) {
503                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
504                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
505                 struct dma_fence_array *array;
506                 unsigned j;
507
508                 for (j = 0; j < i; ++j)
509                         dma_fence_get(fences[j]);
510
511                 array = dma_fence_array_create(i, fences, fence_context,
512                                            seqno, true);
513                 if (!array) {
514                         for (j = 0; j < i; ++j)
515                                 dma_fence_put(fences[j]);
516                         kfree(fences);
517                         r = -ENOMEM;
518                         goto error;
519                 }
520
521
522                 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
523                 dma_fence_put(&array->base);
524                 if (r)
525                         goto error;
526
527                 mutex_unlock(&id_mgr->lock);
528                 return 0;
529
530         }
531         kfree(fences);
532
533         job->vm_needs_flush = vm->use_cpu_for_update;
534         /* Check if we can use a VMID already assigned to this VM */
535         list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
536                 struct dma_fence *flushed;
537                 bool needs_flush = vm->use_cpu_for_update;
538
539                 /* Check all the prerequisites to using this VMID */
540                 if (amdgpu_vm_had_gpu_reset(adev, id))
541                         continue;
542
543                 if (atomic64_read(&id->owner) != vm->client_id)
544                         continue;
545
546                 if (job->vm_pd_addr != id->pd_gpu_addr)
547                         continue;
548
549                 if (!id->last_flush ||
550                     (id->last_flush->context != fence_context &&
551                      !dma_fence_is_signaled(id->last_flush)))
552                         needs_flush = true;
553
554                 flushed  = id->flushed_updates;
555                 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
556                         needs_flush = true;
557
558                 /* Concurrent flushes are only possible starting with Vega10 */
559                 if (adev->asic_type < CHIP_VEGA10 && needs_flush)
560                         continue;
561
562                 /* Good we can use this VMID. Remember this submission as
563                  * user of the VMID.
564                  */
565                 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
566                 if (r)
567                         goto error;
568
569                 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
570                         dma_fence_put(id->flushed_updates);
571                         id->flushed_updates = dma_fence_get(updates);
572                 }
573
574                 if (needs_flush)
575                         goto needs_flush;
576                 else
577                         goto no_flush_needed;
578
579         };
580
581         /* Still no ID to use? Then use the idle one found earlier */
582         id = idle;
583
584         /* Remember this submission as user of the VMID */
585         r = amdgpu_sync_fence(ring->adev, &id->active, fence);
586         if (r)
587                 goto error;
588
589         id->pd_gpu_addr = job->vm_pd_addr;
590         dma_fence_put(id->flushed_updates);
591         id->flushed_updates = dma_fence_get(updates);
592         atomic64_set(&id->owner, vm->client_id);
593
594 needs_flush:
595         job->vm_needs_flush = true;
596         dma_fence_put(id->last_flush);
597         id->last_flush = NULL;
598
599 no_flush_needed:
600         list_move_tail(&id->list, &id_mgr->ids_lru);
601
602         job->vm_id = id - id_mgr->ids;
603         trace_amdgpu_vm_grab_id(vm, ring, job);
604
605 error:
606         mutex_unlock(&id_mgr->lock);
607         return r;
608 }
609
610 static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev,
611                                           struct amdgpu_vm *vm,
612                                           unsigned vmhub)
613 {
614         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
615
616         mutex_lock(&id_mgr->lock);
617         if (vm->reserved_vmid[vmhub]) {
618                 list_add(&vm->reserved_vmid[vmhub]->list,
619                         &id_mgr->ids_lru);
620                 vm->reserved_vmid[vmhub] = NULL;
621                 atomic_dec(&id_mgr->reserved_vmid_num);
622         }
623         mutex_unlock(&id_mgr->lock);
624 }
625
626 static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
627                                          struct amdgpu_vm *vm,
628                                          unsigned vmhub)
629 {
630         struct amdgpu_vm_id_manager *id_mgr;
631         struct amdgpu_vm_id *idle;
632         int r = 0;
633
634         id_mgr = &adev->vm_manager.id_mgr[vmhub];
635         mutex_lock(&id_mgr->lock);
636         if (vm->reserved_vmid[vmhub])
637                 goto unlock;
638         if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
639             AMDGPU_VM_MAX_RESERVED_VMID) {
640                 DRM_ERROR("Over limitation of reserved vmid\n");
641                 atomic_dec(&id_mgr->reserved_vmid_num);
642                 r = -EINVAL;
643                 goto unlock;
644         }
645         /* Select the first entry VMID */
646         idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list);
647         list_del_init(&idle->list);
648         vm->reserved_vmid[vmhub] = idle;
649         mutex_unlock(&id_mgr->lock);
650
651         return 0;
652 unlock:
653         mutex_unlock(&id_mgr->lock);
654         return r;
655 }
656
657 /**
658  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
659  *
660  * @adev: amdgpu_device pointer
661  */
662 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
663 {
664         const struct amdgpu_ip_block *ip_block;
665         bool has_compute_vm_bug;
666         struct amdgpu_ring *ring;
667         int i;
668
669         has_compute_vm_bug = false;
670
671         ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
672         if (ip_block) {
673                 /* Compute has a VM bug for GFX version < 7.
674                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
675                 if (ip_block->version->major <= 7)
676                         has_compute_vm_bug = true;
677                 else if (ip_block->version->major == 8)
678                         if (adev->gfx.mec_fw_version < 673)
679                                 has_compute_vm_bug = true;
680         }
681
682         for (i = 0; i < adev->num_rings; i++) {
683                 ring = adev->rings[i];
684                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
685                         /* only compute rings */
686                         ring->has_compute_vm_bug = has_compute_vm_bug;
687                 else
688                         ring->has_compute_vm_bug = false;
689         }
690 }
691
692 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
693                                   struct amdgpu_job *job)
694 {
695         struct amdgpu_device *adev = ring->adev;
696         unsigned vmhub = ring->funcs->vmhub;
697         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
698         struct amdgpu_vm_id *id;
699         bool gds_switch_needed;
700         bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
701
702         if (job->vm_id == 0)
703                 return false;
704         id = &id_mgr->ids[job->vm_id];
705         gds_switch_needed = ring->funcs->emit_gds_switch && (
706                 id->gds_base != job->gds_base ||
707                 id->gds_size != job->gds_size ||
708                 id->gws_base != job->gws_base ||
709                 id->gws_size != job->gws_size ||
710                 id->oa_base != job->oa_base ||
711                 id->oa_size != job->oa_size);
712
713         if (amdgpu_vm_had_gpu_reset(adev, id))
714                 return true;
715
716         return vm_flush_needed || gds_switch_needed;
717 }
718
719 static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
720 {
721         return (adev->mc.real_vram_size == adev->mc.visible_vram_size);
722 }
723
724 /**
725  * amdgpu_vm_flush - hardware flush the vm
726  *
727  * @ring: ring to use for flush
728  * @vm_id: vmid number to use
729  * @pd_addr: address of the page directory
730  *
731  * Emit a VM flush when it is necessary.
732  */
733 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
734 {
735         struct amdgpu_device *adev = ring->adev;
736         unsigned vmhub = ring->funcs->vmhub;
737         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
738         struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
739         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
740                 id->gds_base != job->gds_base ||
741                 id->gds_size != job->gds_size ||
742                 id->gws_base != job->gws_base ||
743                 id->gws_size != job->gws_size ||
744                 id->oa_base != job->oa_base ||
745                 id->oa_size != job->oa_size);
746         bool vm_flush_needed = job->vm_needs_flush;
747         unsigned patch_offset = 0;
748         int r;
749
750         if (amdgpu_vm_had_gpu_reset(adev, id)) {
751                 gds_switch_needed = true;
752                 vm_flush_needed = true;
753         }
754
755         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
756                 return 0;
757
758         if (ring->funcs->init_cond_exec)
759                 patch_offset = amdgpu_ring_init_cond_exec(ring);
760
761         if (need_pipe_sync)
762                 amdgpu_ring_emit_pipeline_sync(ring);
763
764         if (ring->funcs->emit_vm_flush && vm_flush_needed) {
765                 struct dma_fence *fence;
766
767                 trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr);
768                 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
769
770                 r = amdgpu_fence_emit(ring, &fence);
771                 if (r)
772                         return r;
773
774                 mutex_lock(&id_mgr->lock);
775                 dma_fence_put(id->last_flush);
776                 id->last_flush = fence;
777                 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
778                 mutex_unlock(&id_mgr->lock);
779         }
780
781         if (ring->funcs->emit_gds_switch && gds_switch_needed) {
782                 id->gds_base = job->gds_base;
783                 id->gds_size = job->gds_size;
784                 id->gws_base = job->gws_base;
785                 id->gws_size = job->gws_size;
786                 id->oa_base = job->oa_base;
787                 id->oa_size = job->oa_size;
788                 amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
789                                             job->gds_size, job->gws_base,
790                                             job->gws_size, job->oa_base,
791                                             job->oa_size);
792         }
793
794         if (ring->funcs->patch_cond_exec)
795                 amdgpu_ring_patch_cond_exec(ring, patch_offset);
796
797         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
798         if (ring->funcs->emit_switch_buffer) {
799                 amdgpu_ring_emit_switch_buffer(ring);
800                 amdgpu_ring_emit_switch_buffer(ring);
801         }
802         return 0;
803 }
804
805 /**
806  * amdgpu_vm_reset_id - reset VMID to zero
807  *
808  * @adev: amdgpu device structure
809  * @vm_id: vmid number to use
810  *
811  * Reset saved GDW, GWS and OA to force switch on next flush.
812  */
813 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
814                         unsigned vmid)
815 {
816         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
817         struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
818
819         atomic64_set(&id->owner, 0);
820         id->gds_base = 0;
821         id->gds_size = 0;
822         id->gws_base = 0;
823         id->gws_size = 0;
824         id->oa_base = 0;
825         id->oa_size = 0;
826 }
827
828 /**
829  * amdgpu_vm_reset_all_id - reset VMID to zero
830  *
831  * @adev: amdgpu device structure
832  *
833  * Reset VMID to force flush on next use
834  */
835 void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
836 {
837         unsigned i, j;
838
839         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
840                 struct amdgpu_vm_id_manager *id_mgr =
841                         &adev->vm_manager.id_mgr[i];
842
843                 for (j = 1; j < id_mgr->num_ids; ++j)
844                         amdgpu_vm_reset_id(adev, i, j);
845         }
846 }
847
848 /**
849  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
850  *
851  * @vm: requested vm
852  * @bo: requested buffer object
853  *
854  * Find @bo inside the requested vm.
855  * Search inside the @bos vm list for the requested vm
856  * Returns the found bo_va or NULL if none is found
857  *
858  * Object has to be reserved!
859  */
860 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
861                                        struct amdgpu_bo *bo)
862 {
863         struct amdgpu_bo_va *bo_va;
864
865         list_for_each_entry(bo_va, &bo->va, base.bo_list) {
866                 if (bo_va->base.vm == vm) {
867                         return bo_va;
868                 }
869         }
870         return NULL;
871 }
872
873 /**
874  * amdgpu_vm_do_set_ptes - helper to call the right asic function
875  *
876  * @params: see amdgpu_pte_update_params definition
877  * @pe: addr of the page entry
878  * @addr: dst addr to write into pe
879  * @count: number of page entries to update
880  * @incr: increase next addr by incr bytes
881  * @flags: hw access flags
882  *
883  * Traces the parameters and calls the right asic functions
884  * to setup the page table using the DMA.
885  */
886 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
887                                   uint64_t pe, uint64_t addr,
888                                   unsigned count, uint32_t incr,
889                                   uint64_t flags)
890 {
891         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
892
893         if (count < 3) {
894                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
895                                     addr | flags, count, incr);
896
897         } else {
898                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
899                                       count, incr, flags);
900         }
901 }
902
903 /**
904  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
905  *
906  * @params: see amdgpu_pte_update_params definition
907  * @pe: addr of the page entry
908  * @addr: dst addr to write into pe
909  * @count: number of page entries to update
910  * @incr: increase next addr by incr bytes
911  * @flags: hw access flags
912  *
913  * Traces the parameters and calls the DMA function to copy the PTEs.
914  */
915 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
916                                    uint64_t pe, uint64_t addr,
917                                    unsigned count, uint32_t incr,
918                                    uint64_t flags)
919 {
920         uint64_t src = (params->src + (addr >> 12) * 8);
921
922
923         trace_amdgpu_vm_copy_ptes(pe, src, count);
924
925         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
926 }
927
928 /**
929  * amdgpu_vm_map_gart - Resolve gart mapping of addr
930  *
931  * @pages_addr: optional DMA address to use for lookup
932  * @addr: the unmapped addr
933  *
934  * Look up the physical address of the page that the pte resolves
935  * to and return the pointer for the page table entry.
936  */
937 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
938 {
939         uint64_t result;
940
941         /* page table offset */
942         result = pages_addr[addr >> PAGE_SHIFT];
943
944         /* in case cpu page size != gpu page size*/
945         result |= addr & (~PAGE_MASK);
946
947         result &= 0xFFFFFFFFFFFFF000ULL;
948
949         return result;
950 }
951
952 /**
953  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
954  *
955  * @params: see amdgpu_pte_update_params definition
956  * @pe: kmap addr of the page entry
957  * @addr: dst addr to write into pe
958  * @count: number of page entries to update
959  * @incr: increase next addr by incr bytes
960  * @flags: hw access flags
961  *
962  * Write count number of PT/PD entries directly.
963  */
964 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
965                                    uint64_t pe, uint64_t addr,
966                                    unsigned count, uint32_t incr,
967                                    uint64_t flags)
968 {
969         unsigned int i;
970         uint64_t value;
971
972         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
973
974         for (i = 0; i < count; i++) {
975                 value = params->pages_addr ?
976                         amdgpu_vm_map_gart(params->pages_addr, addr) :
977                         addr;
978                 amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
979                                         i, value, flags);
980                 addr += incr;
981         }
982 }
983
984 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
985                              void *owner)
986 {
987         struct amdgpu_sync sync;
988         int r;
989
990         amdgpu_sync_create(&sync);
991         amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner);
992         r = amdgpu_sync_wait(&sync, true);
993         amdgpu_sync_free(&sync);
994
995         return r;
996 }
997
998 /*
999  * amdgpu_vm_update_level - update a single level in the hierarchy
1000  *
1001  * @adev: amdgpu_device pointer
1002  * @vm: requested vm
1003  * @parent: parent directory
1004  *
1005  * Makes sure all entries in @parent are up to date.
1006  * Returns 0 for success, error for failure.
1007  */
1008 static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1009                                   struct amdgpu_vm *vm,
1010                                   struct amdgpu_vm_pt *parent,
1011                                   unsigned level)
1012 {
1013         struct amdgpu_bo *shadow;
1014         struct amdgpu_ring *ring = NULL;
1015         uint64_t pd_addr, shadow_addr = 0;
1016         uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
1017         uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
1018         unsigned count = 0, pt_idx, ndw = 0;
1019         struct amdgpu_job *job;
1020         struct amdgpu_pte_update_params params;
1021         struct dma_fence *fence = NULL;
1022
1023         int r;
1024
1025         if (!parent->entries)
1026                 return 0;
1027
1028         memset(&params, 0, sizeof(params));
1029         params.adev = adev;
1030         shadow = parent->bo->shadow;
1031
1032         if (vm->use_cpu_for_update) {
1033                 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
1034                 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1035                 if (unlikely(r))
1036                         return r;
1037
1038                 params.func = amdgpu_vm_cpu_set_ptes;
1039         } else {
1040                 ring = container_of(vm->entity.sched, struct amdgpu_ring,
1041                                     sched);
1042
1043                 /* padding, etc. */
1044                 ndw = 64;
1045
1046                 /* assume the worst case */
1047                 ndw += parent->last_entry_used * 6;
1048
1049                 pd_addr = amdgpu_bo_gpu_offset(parent->bo);
1050
1051                 if (shadow) {
1052                         shadow_addr = amdgpu_bo_gpu_offset(shadow);
1053                         ndw *= 2;
1054                 } else {
1055                         shadow_addr = 0;
1056                 }
1057
1058                 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1059                 if (r)
1060                         return r;
1061
1062                 params.ib = &job->ibs[0];
1063                 params.func = amdgpu_vm_do_set_ptes;
1064         }
1065
1066
1067         /* walk over the address space and update the directory */
1068         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1069                 struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
1070                 uint64_t pde, pt;
1071
1072                 if (bo == NULL)
1073                         continue;
1074
1075                 pt = amdgpu_bo_gpu_offset(bo);
1076                 pt = amdgpu_gart_get_vm_pde(adev, pt);
1077                 /* Don't update huge pages here */
1078                 if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
1079                     parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID))
1080                         continue;
1081
1082                 parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
1083
1084                 pde = pd_addr + pt_idx * 8;
1085                 if (((last_pde + 8 * count) != pde) ||
1086                     ((last_pt + incr * count) != pt) ||
1087                     (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
1088
1089                         if (count) {
1090                                 if (shadow)
1091                                         params.func(&params,
1092                                                     last_shadow,
1093                                                     last_pt, count,
1094                                                     incr,
1095                                                     AMDGPU_PTE_VALID);
1096
1097                                 params.func(&params, last_pde,
1098                                             last_pt, count, incr,
1099                                             AMDGPU_PTE_VALID);
1100                         }
1101
1102                         count = 1;
1103                         last_pde = pde;
1104                         last_shadow = shadow_addr + pt_idx * 8;
1105                         last_pt = pt;
1106                 } else {
1107                         ++count;
1108                 }
1109         }
1110
1111         if (count) {
1112                 if (vm->root.bo->shadow)
1113                         params.func(&params, last_shadow, last_pt,
1114                                     count, incr, AMDGPU_PTE_VALID);
1115
1116                 params.func(&params, last_pde, last_pt,
1117                             count, incr, AMDGPU_PTE_VALID);
1118         }
1119
1120         if (!vm->use_cpu_for_update) {
1121                 if (params.ib->length_dw == 0) {
1122                         amdgpu_job_free(job);
1123                 } else {
1124                         amdgpu_ring_pad_ib(ring, params.ib);
1125                         amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
1126                                          AMDGPU_FENCE_OWNER_VM);
1127                         if (shadow)
1128                                 amdgpu_sync_resv(adev, &job->sync,
1129                                                  shadow->tbo.resv,
1130                                                  AMDGPU_FENCE_OWNER_VM);
1131
1132                         WARN_ON(params.ib->length_dw > ndw);
1133                         r = amdgpu_job_submit(job, ring, &vm->entity,
1134                                         AMDGPU_FENCE_OWNER_VM, &fence);
1135                         if (r)
1136                                 goto error_free;
1137
1138                         amdgpu_bo_fence(parent->bo, fence, true);
1139                         dma_fence_put(vm->last_dir_update);
1140                         vm->last_dir_update = dma_fence_get(fence);
1141                         dma_fence_put(fence);
1142                 }
1143         }
1144         /*
1145          * Recurse into the subdirectories. This recursion is harmless because
1146          * we only have a maximum of 5 layers.
1147          */
1148         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1149                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1150
1151                 if (!entry->bo)
1152                         continue;
1153
1154                 r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
1155                 if (r)
1156                         return r;
1157         }
1158
1159         return 0;
1160
1161 error_free:
1162         amdgpu_job_free(job);
1163         return r;
1164 }
1165
1166 /*
1167  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
1168  *
1169  * @parent: parent PD
1170  *
1171  * Mark all PD level as invalid after an error.
1172  */
1173 static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
1174 {
1175         unsigned pt_idx;
1176
1177         /*
1178          * Recurse into the subdirectories. This recursion is harmless because
1179          * we only have a maximum of 5 layers.
1180          */
1181         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1182                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1183
1184                 if (!entry->bo)
1185                         continue;
1186
1187                 entry->addr = ~0ULL;
1188                 amdgpu_vm_invalidate_level(entry);
1189         }
1190 }
1191
1192 /*
1193  * amdgpu_vm_update_directories - make sure that all directories are valid
1194  *
1195  * @adev: amdgpu_device pointer
1196  * @vm: requested vm
1197  *
1198  * Makes sure all directories are up to date.
1199  * Returns 0 for success, error for failure.
1200  */
1201 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1202                                  struct amdgpu_vm *vm)
1203 {
1204         int r;
1205
1206         r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
1207         if (r)
1208                 amdgpu_vm_invalidate_level(&vm->root);
1209
1210         if (vm->use_cpu_for_update) {
1211                 /* Flush HDP */
1212                 mb();
1213                 amdgpu_gart_flush_gpu_tlb(adev, 0);
1214         }
1215
1216         return r;
1217 }
1218
1219 /**
1220  * amdgpu_vm_find_entry - find the entry for an address
1221  *
1222  * @p: see amdgpu_pte_update_params definition
1223  * @addr: virtual address in question
1224  * @entry: resulting entry or NULL
1225  * @parent: parent entry
1226  *
1227  * Find the vm_pt entry and it's parent for the given address.
1228  */
1229 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1230                          struct amdgpu_vm_pt **entry,
1231                          struct amdgpu_vm_pt **parent)
1232 {
1233         unsigned idx, level = p->adev->vm_manager.num_level;
1234
1235         *parent = NULL;
1236         *entry = &p->vm->root;
1237         while ((*entry)->entries) {
1238                 idx = addr >> (p->adev->vm_manager.block_size * level--);
1239                 idx %= amdgpu_bo_size((*entry)->bo) / 8;
1240                 *parent = *entry;
1241                 *entry = &(*entry)->entries[idx];
1242         }
1243
1244         if (level)
1245                 *entry = NULL;
1246 }
1247
1248 /**
1249  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1250  *
1251  * @p: see amdgpu_pte_update_params definition
1252  * @entry: vm_pt entry to check
1253  * @parent: parent entry
1254  * @nptes: number of PTEs updated with this operation
1255  * @dst: destination address where the PTEs should point to
1256  * @flags: access flags fro the PTEs
1257  *
1258  * Check if we can update the PD with a huge page.
1259  */
1260 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1261                                         struct amdgpu_vm_pt *entry,
1262                                         struct amdgpu_vm_pt *parent,
1263                                         unsigned nptes, uint64_t dst,
1264                                         uint64_t flags)
1265 {
1266         bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
1267         uint64_t pd_addr, pde;
1268
1269         /* In the case of a mixed PT the PDE must point to it*/
1270         if (p->adev->asic_type < CHIP_VEGA10 ||
1271             nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
1272             p->src ||
1273             !(flags & AMDGPU_PTE_VALID)) {
1274
1275                 dst = amdgpu_bo_gpu_offset(entry->bo);
1276                 dst = amdgpu_gart_get_vm_pde(p->adev, dst);
1277                 flags = AMDGPU_PTE_VALID;
1278         } else {
1279                 /* Set the huge page flag to stop scanning at this PDE */
1280                 flags |= AMDGPU_PDE_PTE;
1281         }
1282
1283         if (entry->addr == (dst | flags))
1284                 return;
1285
1286         entry->addr = (dst | flags);
1287
1288         if (use_cpu_update) {
1289                 /* In case a huge page is replaced with a system
1290                  * memory mapping, p->pages_addr != NULL and
1291                  * amdgpu_vm_cpu_set_ptes would try to translate dst
1292                  * through amdgpu_vm_map_gart. But dst is already a
1293                  * GPU address (of the page table). Disable
1294                  * amdgpu_vm_map_gart temporarily.
1295                  */
1296                 dma_addr_t *tmp;
1297
1298                 tmp = p->pages_addr;
1299                 p->pages_addr = NULL;
1300
1301                 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
1302                 pde = pd_addr + (entry - parent->entries) * 8;
1303                 amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
1304
1305                 p->pages_addr = tmp;
1306         } else {
1307                 if (parent->bo->shadow) {
1308                         pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
1309                         pde = pd_addr + (entry - parent->entries) * 8;
1310                         amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
1311                 }
1312                 pd_addr = amdgpu_bo_gpu_offset(parent->bo);
1313                 pde = pd_addr + (entry - parent->entries) * 8;
1314                 amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
1315         }
1316 }
1317
1318 /**
1319  * amdgpu_vm_update_ptes - make sure that page tables are valid
1320  *
1321  * @params: see amdgpu_pte_update_params definition
1322  * @vm: requested vm
1323  * @start: start of GPU address range
1324  * @end: end of GPU address range
1325  * @dst: destination address to map to, the next dst inside the function
1326  * @flags: mapping flags
1327  *
1328  * Update the page tables in the range @start - @end.
1329  * Returns 0 for success, -EINVAL for failure.
1330  */
1331 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1332                                   uint64_t start, uint64_t end,
1333                                   uint64_t dst, uint64_t flags)
1334 {
1335         struct amdgpu_device *adev = params->adev;
1336         const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1337
1338         uint64_t addr, pe_start;
1339         struct amdgpu_bo *pt;
1340         unsigned nptes;
1341         bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
1342
1343         /* walk over the address space and update the page tables */
1344         for (addr = start; addr < end; addr += nptes,
1345              dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1346                 struct amdgpu_vm_pt *entry, *parent;
1347
1348                 amdgpu_vm_get_entry(params, addr, &entry, &parent);
1349                 if (!entry)
1350                         return -ENOENT;
1351
1352                 if ((addr & ~mask) == (end & ~mask))
1353                         nptes = end - addr;
1354                 else
1355                         nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1356
1357                 amdgpu_vm_handle_huge_pages(params, entry, parent,
1358                                             nptes, dst, flags);
1359                 /* We don't need to update PTEs for huge pages */
1360                 if (entry->addr & AMDGPU_PDE_PTE)
1361                         continue;
1362
1363                 pt = entry->bo;
1364                 if (use_cpu_update) {
1365                         pe_start = (unsigned long)amdgpu_bo_kptr(pt);
1366                 } else {
1367                         if (pt->shadow) {
1368                                 pe_start = amdgpu_bo_gpu_offset(pt->shadow);
1369                                 pe_start += (addr & mask) * 8;
1370                                 params->func(params, pe_start, dst, nptes,
1371                                              AMDGPU_GPU_PAGE_SIZE, flags);
1372                         }
1373                         pe_start = amdgpu_bo_gpu_offset(pt);
1374                 }
1375
1376                 pe_start += (addr & mask) * 8;
1377                 params->func(params, pe_start, dst, nptes,
1378                              AMDGPU_GPU_PAGE_SIZE, flags);
1379         }
1380
1381         return 0;
1382 }
1383
1384 /*
1385  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1386  *
1387  * @params: see amdgpu_pte_update_params definition
1388  * @vm: requested vm
1389  * @start: first PTE to handle
1390  * @end: last PTE to handle
1391  * @dst: addr those PTEs should point to
1392  * @flags: hw mapping flags
1393  * Returns 0 for success, -EINVAL for failure.
1394  */
1395 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params  *params,
1396                                 uint64_t start, uint64_t end,
1397                                 uint64_t dst, uint64_t flags)
1398 {
1399         int r;
1400
1401         /**
1402          * The MC L1 TLB supports variable sized pages, based on a fragment
1403          * field in the PTE. When this field is set to a non-zero value, page
1404          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1405          * flags are considered valid for all PTEs within the fragment range
1406          * and corresponding mappings are assumed to be physically contiguous.
1407          *
1408          * The L1 TLB can store a single PTE for the whole fragment,
1409          * significantly increasing the space available for translation
1410          * caching. This leads to large improvements in throughput when the
1411          * TLB is under pressure.
1412          *
1413          * The L2 TLB distributes small and large fragments into two
1414          * asymmetric partitions. The large fragment cache is significantly
1415          * larger. Thus, we try to use large fragments wherever possible.
1416          * Userspace can support this by aligning virtual base address and
1417          * allocation size to the fragment size.
1418          */
1419         unsigned pages_per_frag = params->adev->vm_manager.fragment_size;
1420         uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
1421         uint64_t frag_align = 1 << pages_per_frag;
1422
1423         uint64_t frag_start = ALIGN(start, frag_align);
1424         uint64_t frag_end = end & ~(frag_align - 1);
1425
1426         /* system pages are non continuously */
1427         if (params->src || !(flags & AMDGPU_PTE_VALID) ||
1428             (frag_start >= frag_end))
1429                 return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1430
1431         /* handle the 4K area at the beginning */
1432         if (start != frag_start) {
1433                 r = amdgpu_vm_update_ptes(params, start, frag_start,
1434                                           dst, flags);
1435                 if (r)
1436                         return r;
1437                 dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
1438         }
1439
1440         /* handle the area in the middle */
1441         r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
1442                                   flags | frag_flags);
1443         if (r)
1444                 return r;
1445
1446         /* handle the 4K area at the end */
1447         if (frag_end != end) {
1448                 dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
1449                 r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
1450         }
1451         return r;
1452 }
1453
1454 /**
1455  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1456  *
1457  * @adev: amdgpu_device pointer
1458  * @exclusive: fence we need to sync to
1459  * @src: address where to copy page table entries from
1460  * @pages_addr: DMA addresses to use for mapping
1461  * @vm: requested vm
1462  * @start: start of mapped range
1463  * @last: last mapped entry
1464  * @flags: flags for the entries
1465  * @addr: addr to set the area to
1466  * @fence: optional resulting fence
1467  *
1468  * Fill in the page table entries between @start and @last.
1469  * Returns 0 for success, -EINVAL for failure.
1470  */
1471 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1472                                        struct dma_fence *exclusive,
1473                                        uint64_t src,
1474                                        dma_addr_t *pages_addr,
1475                                        struct amdgpu_vm *vm,
1476                                        uint64_t start, uint64_t last,
1477                                        uint64_t flags, uint64_t addr,
1478                                        struct dma_fence **fence)
1479 {
1480         struct amdgpu_ring *ring;
1481         void *owner = AMDGPU_FENCE_OWNER_VM;
1482         unsigned nptes, ncmds, ndw;
1483         struct amdgpu_job *job;
1484         struct amdgpu_pte_update_params params;
1485         struct dma_fence *f = NULL;
1486         int r;
1487
1488         memset(&params, 0, sizeof(params));
1489         params.adev = adev;
1490         params.vm = vm;
1491         params.src = src;
1492
1493         /* sync to everything on unmapping */
1494         if (!(flags & AMDGPU_PTE_VALID))
1495                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1496
1497         if (vm->use_cpu_for_update) {
1498                 /* params.src is used as flag to indicate system Memory */
1499                 if (pages_addr)
1500                         params.src = ~0;
1501
1502                 /* Wait for PT BOs to be free. PTs share the same resv. object
1503                  * as the root PD BO
1504                  */
1505                 r = amdgpu_vm_wait_pd(adev, vm, owner);
1506                 if (unlikely(r))
1507                         return r;
1508
1509                 params.func = amdgpu_vm_cpu_set_ptes;
1510                 params.pages_addr = pages_addr;
1511                 return amdgpu_vm_frag_ptes(&params, start, last + 1,
1512                                            addr, flags);
1513         }
1514
1515         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1516
1517         nptes = last - start + 1;
1518
1519         /*
1520          * reserve space for one command every (1 << BLOCK_SIZE)
1521          *  entries or 2k dwords (whatever is smaller)
1522          */
1523         ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
1524
1525         /* padding, etc. */
1526         ndw = 64;
1527
1528         /* one PDE write for each huge page */
1529         ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
1530
1531         if (src) {
1532                 /* only copy commands needed */
1533                 ndw += ncmds * 7;
1534
1535                 params.func = amdgpu_vm_do_copy_ptes;
1536
1537         } else if (pages_addr) {
1538                 /* copy commands needed */
1539                 ndw += ncmds * 7;
1540
1541                 /* and also PTEs */
1542                 ndw += nptes * 2;
1543
1544                 params.func = amdgpu_vm_do_copy_ptes;
1545
1546         } else {
1547                 /* set page commands needed */
1548                 ndw += ncmds * 10;
1549
1550                 /* two extra commands for begin/end of fragment */
1551                 ndw += 2 * 10;
1552
1553                 params.func = amdgpu_vm_do_set_ptes;
1554         }
1555
1556         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1557         if (r)
1558                 return r;
1559
1560         params.ib = &job->ibs[0];
1561
1562         if (!src && pages_addr) {
1563                 uint64_t *pte;
1564                 unsigned i;
1565
1566                 /* Put the PTEs at the end of the IB. */
1567                 i = ndw - nptes * 2;
1568                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1569                 params.src = job->ibs->gpu_addr + i * 4;
1570
1571                 for (i = 0; i < nptes; ++i) {
1572                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1573                                                     AMDGPU_GPU_PAGE_SIZE);
1574                         pte[i] |= flags;
1575                 }
1576                 addr = 0;
1577         }
1578
1579         r = amdgpu_sync_fence(adev, &job->sync, exclusive);
1580         if (r)
1581                 goto error_free;
1582
1583         r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
1584                              owner);
1585         if (r)
1586                 goto error_free;
1587
1588         r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
1589         if (r)
1590                 goto error_free;
1591
1592         r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1593         if (r)
1594                 goto error_free;
1595
1596         amdgpu_ring_pad_ib(ring, params.ib);
1597         WARN_ON(params.ib->length_dw > ndw);
1598         r = amdgpu_job_submit(job, ring, &vm->entity,
1599                               AMDGPU_FENCE_OWNER_VM, &f);
1600         if (r)
1601                 goto error_free;
1602
1603         amdgpu_bo_fence(vm->root.bo, f, true);
1604         dma_fence_put(*fence);
1605         *fence = f;
1606         return 0;
1607
1608 error_free:
1609         amdgpu_job_free(job);
1610         amdgpu_vm_invalidate_level(&vm->root);
1611         return r;
1612 }
1613
1614 /**
1615  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1616  *
1617  * @adev: amdgpu_device pointer
1618  * @exclusive: fence we need to sync to
1619  * @gtt_flags: flags as they are used for GTT
1620  * @pages_addr: DMA addresses to use for mapping
1621  * @vm: requested vm
1622  * @mapping: mapped range and flags to use for the update
1623  * @flags: HW flags for the mapping
1624  * @nodes: array of drm_mm_nodes with the MC addresses
1625  * @fence: optional resulting fence
1626  *
1627  * Split the mapping into smaller chunks so that each update fits
1628  * into a SDMA IB.
1629  * Returns 0 for success, -EINVAL for failure.
1630  */
1631 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1632                                       struct dma_fence *exclusive,
1633                                       uint64_t gtt_flags,
1634                                       dma_addr_t *pages_addr,
1635                                       struct amdgpu_vm *vm,
1636                                       struct amdgpu_bo_va_mapping *mapping,
1637                                       uint64_t flags,
1638                                       struct drm_mm_node *nodes,
1639                                       struct dma_fence **fence)
1640 {
1641         uint64_t pfn, src = 0, start = mapping->start;
1642         int r;
1643
1644         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1645          * but in case of something, we filter the flags in first place
1646          */
1647         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1648                 flags &= ~AMDGPU_PTE_READABLE;
1649         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1650                 flags &= ~AMDGPU_PTE_WRITEABLE;
1651
1652         flags &= ~AMDGPU_PTE_EXECUTABLE;
1653         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1654
1655         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1656         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1657
1658         if ((mapping->flags & AMDGPU_PTE_PRT) &&
1659             (adev->asic_type >= CHIP_VEGA10)) {
1660                 flags |= AMDGPU_PTE_PRT;
1661                 flags &= ~AMDGPU_PTE_VALID;
1662         }
1663
1664         trace_amdgpu_vm_bo_update(mapping);
1665
1666         pfn = mapping->offset >> PAGE_SHIFT;
1667         if (nodes) {
1668                 while (pfn >= nodes->size) {
1669                         pfn -= nodes->size;
1670                         ++nodes;
1671                 }
1672         }
1673
1674         do {
1675                 uint64_t max_entries;
1676                 uint64_t addr, last;
1677
1678                 if (nodes) {
1679                         addr = nodes->start << PAGE_SHIFT;
1680                         max_entries = (nodes->size - pfn) *
1681                                 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1682                 } else {
1683                         addr = 0;
1684                         max_entries = S64_MAX;
1685                 }
1686
1687                 if (pages_addr) {
1688                         if (flags == gtt_flags)
1689                                 src = adev->gart.table_addr +
1690                                         (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
1691                         else
1692                                 max_entries = min(max_entries, 16ull * 1024ull);
1693                         addr = 0;
1694                 } else if (flags & AMDGPU_PTE_VALID) {
1695                         addr += adev->vm_manager.vram_base_offset;
1696                 }
1697                 addr += pfn << PAGE_SHIFT;
1698
1699                 last = min((uint64_t)mapping->last, start + max_entries - 1);
1700                 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1701                                                 src, pages_addr, vm,
1702                                                 start, last, flags, addr,
1703                                                 fence);
1704                 if (r)
1705                         return r;
1706
1707                 pfn += last - start + 1;
1708                 if (nodes && nodes->size == pfn) {
1709                         pfn = 0;
1710                         ++nodes;
1711                 }
1712                 start = last + 1;
1713
1714         } while (unlikely(start != mapping->last + 1));
1715
1716         return 0;
1717 }
1718
1719 /**
1720  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1721  *
1722  * @adev: amdgpu_device pointer
1723  * @bo_va: requested BO and VM object
1724  * @clear: if true clear the entries
1725  *
1726  * Fill in the page table entries for @bo_va.
1727  * Returns 0 for success, -EINVAL for failure.
1728  */
1729 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1730                         struct amdgpu_bo_va *bo_va,
1731                         bool clear)
1732 {
1733         struct amdgpu_bo *bo = bo_va->base.bo;
1734         struct amdgpu_vm *vm = bo_va->base.vm;
1735         struct amdgpu_bo_va_mapping *mapping;
1736         dma_addr_t *pages_addr = NULL;
1737         uint64_t gtt_flags, flags;
1738         struct ttm_mem_reg *mem;
1739         struct drm_mm_node *nodes;
1740         struct dma_fence *exclusive;
1741         int r;
1742
1743         if (clear || !bo_va->base.bo) {
1744                 mem = NULL;
1745                 nodes = NULL;
1746                 exclusive = NULL;
1747         } else {
1748                 struct ttm_dma_tt *ttm;
1749
1750                 mem = &bo_va->base.bo->tbo.mem;
1751                 nodes = mem->mm_node;
1752                 if (mem->mem_type == TTM_PL_TT) {
1753                         ttm = container_of(bo_va->base.bo->tbo.ttm,
1754                                            struct ttm_dma_tt, ttm);
1755                         pages_addr = ttm->dma_address;
1756                 }
1757                 exclusive = reservation_object_get_excl(bo->tbo.resv);
1758         }
1759
1760         if (bo) {
1761                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1762                 gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
1763                         adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
1764                         flags : 0;
1765         } else {
1766                 flags = 0x0;
1767                 gtt_flags = ~0x0;
1768         }
1769
1770         spin_lock(&vm->status_lock);
1771         if (!list_empty(&bo_va->base.vm_status))
1772                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1773         spin_unlock(&vm->status_lock);
1774
1775         list_for_each_entry(mapping, &bo_va->invalids, list) {
1776                 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1777                                                gtt_flags, pages_addr, vm,
1778                                                mapping, flags, nodes,
1779                                                &bo_va->last_pt_update);
1780                 if (r)
1781                         return r;
1782         }
1783
1784         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1785                 list_for_each_entry(mapping, &bo_va->valids, list)
1786                         trace_amdgpu_vm_bo_mapping(mapping);
1787
1788                 list_for_each_entry(mapping, &bo_va->invalids, list)
1789                         trace_amdgpu_vm_bo_mapping(mapping);
1790         }
1791
1792         spin_lock(&vm->status_lock);
1793         list_splice_init(&bo_va->invalids, &bo_va->valids);
1794         list_del_init(&bo_va->base.vm_status);
1795         if (clear)
1796                 list_add(&bo_va->base.vm_status, &vm->cleared);
1797         spin_unlock(&vm->status_lock);
1798
1799         if (vm->use_cpu_for_update) {
1800                 /* Flush HDP */
1801                 mb();
1802                 amdgpu_gart_flush_gpu_tlb(adev, 0);
1803         }
1804
1805         return 0;
1806 }
1807
1808 /**
1809  * amdgpu_vm_update_prt_state - update the global PRT state
1810  */
1811 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1812 {
1813         unsigned long flags;
1814         bool enable;
1815
1816         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1817         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1818         adev->gart.gart_funcs->set_prt(adev, enable);
1819         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1820 }
1821
1822 /**
1823  * amdgpu_vm_prt_get - add a PRT user
1824  */
1825 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1826 {
1827         if (!adev->gart.gart_funcs->set_prt)
1828                 return;
1829
1830         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1831                 amdgpu_vm_update_prt_state(adev);
1832 }
1833
1834 /**
1835  * amdgpu_vm_prt_put - drop a PRT user
1836  */
1837 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1838 {
1839         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1840                 amdgpu_vm_update_prt_state(adev);
1841 }
1842
1843 /**
1844  * amdgpu_vm_prt_cb - callback for updating the PRT status
1845  */
1846 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1847 {
1848         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1849
1850         amdgpu_vm_prt_put(cb->adev);
1851         kfree(cb);
1852 }
1853
1854 /**
1855  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1856  */
1857 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1858                                  struct dma_fence *fence)
1859 {
1860         struct amdgpu_prt_cb *cb;
1861
1862         if (!adev->gart.gart_funcs->set_prt)
1863                 return;
1864
1865         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1866         if (!cb) {
1867                 /* Last resort when we are OOM */
1868                 if (fence)
1869                         dma_fence_wait(fence, false);
1870
1871                 amdgpu_vm_prt_put(adev);
1872         } else {
1873                 cb->adev = adev;
1874                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1875                                                      amdgpu_vm_prt_cb))
1876                         amdgpu_vm_prt_cb(fence, &cb->cb);
1877         }
1878 }
1879
1880 /**
1881  * amdgpu_vm_free_mapping - free a mapping
1882  *
1883  * @adev: amdgpu_device pointer
1884  * @vm: requested vm
1885  * @mapping: mapping to be freed
1886  * @fence: fence of the unmap operation
1887  *
1888  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1889  */
1890 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1891                                    struct amdgpu_vm *vm,
1892                                    struct amdgpu_bo_va_mapping *mapping,
1893                                    struct dma_fence *fence)
1894 {
1895         if (mapping->flags & AMDGPU_PTE_PRT)
1896                 amdgpu_vm_add_prt_cb(adev, fence);
1897         kfree(mapping);
1898 }
1899
1900 /**
1901  * amdgpu_vm_prt_fini - finish all prt mappings
1902  *
1903  * @adev: amdgpu_device pointer
1904  * @vm: requested vm
1905  *
1906  * Register a cleanup callback to disable PRT support after VM dies.
1907  */
1908 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1909 {
1910         struct reservation_object *resv = vm->root.bo->tbo.resv;
1911         struct dma_fence *excl, **shared;
1912         unsigned i, shared_count;
1913         int r;
1914
1915         r = reservation_object_get_fences_rcu(resv, &excl,
1916                                               &shared_count, &shared);
1917         if (r) {
1918                 /* Not enough memory to grab the fence list, as last resort
1919                  * block for all the fences to complete.
1920                  */
1921                 reservation_object_wait_timeout_rcu(resv, true, false,
1922                                                     MAX_SCHEDULE_TIMEOUT);
1923                 return;
1924         }
1925
1926         /* Add a callback for each fence in the reservation object */
1927         amdgpu_vm_prt_get(adev);
1928         amdgpu_vm_add_prt_cb(adev, excl);
1929
1930         for (i = 0; i < shared_count; ++i) {
1931                 amdgpu_vm_prt_get(adev);
1932                 amdgpu_vm_add_prt_cb(adev, shared[i]);
1933         }
1934
1935         kfree(shared);
1936 }
1937
1938 /**
1939  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1940  *
1941  * @adev: amdgpu_device pointer
1942  * @vm: requested vm
1943  * @fence: optional resulting fence (unchanged if no work needed to be done
1944  * or if an error occurred)
1945  *
1946  * Make sure all freed BOs are cleared in the PT.
1947  * Returns 0 for success.
1948  *
1949  * PTs have to be reserved and mutex must be locked!
1950  */
1951 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1952                           struct amdgpu_vm *vm,
1953                           struct dma_fence **fence)
1954 {
1955         struct amdgpu_bo_va_mapping *mapping;
1956         struct dma_fence *f = NULL;
1957         int r;
1958         uint64_t init_pte_value = 0;
1959
1960         while (!list_empty(&vm->freed)) {
1961                 mapping = list_first_entry(&vm->freed,
1962                         struct amdgpu_bo_va_mapping, list);
1963                 list_del(&mapping->list);
1964
1965                 if (vm->pte_support_ats)
1966                         init_pte_value = AMDGPU_PTE_SYSTEM;
1967
1968                 r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
1969                                                 mapping->start, mapping->last,
1970                                                 init_pte_value, 0, &f);
1971                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1972                 if (r) {
1973                         dma_fence_put(f);
1974                         return r;
1975                 }
1976         }
1977
1978         if (fence && f) {
1979                 dma_fence_put(*fence);
1980                 *fence = f;
1981         } else {
1982                 dma_fence_put(f);
1983         }
1984
1985         return 0;
1986
1987 }
1988
1989 /**
1990  * amdgpu_vm_clear_moved - clear moved BOs in the PT
1991  *
1992  * @adev: amdgpu_device pointer
1993  * @vm: requested vm
1994  *
1995  * Make sure all moved BOs are cleared in the PT.
1996  * Returns 0 for success.
1997  *
1998  * PTs have to be reserved and mutex must be locked!
1999  */
2000 int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2001                             struct amdgpu_sync *sync)
2002 {
2003         struct amdgpu_bo_va *bo_va = NULL;
2004         int r = 0;
2005
2006         spin_lock(&vm->status_lock);
2007         while (!list_empty(&vm->moved)) {
2008                 bo_va = list_first_entry(&vm->moved,
2009                         struct amdgpu_bo_va, base.vm_status);
2010                 spin_unlock(&vm->status_lock);
2011
2012                 r = amdgpu_vm_bo_update(adev, bo_va, true);
2013                 if (r)
2014                         return r;
2015
2016                 spin_lock(&vm->status_lock);
2017         }
2018         spin_unlock(&vm->status_lock);
2019
2020         if (bo_va)
2021                 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
2022
2023         return r;
2024 }
2025
2026 /**
2027  * amdgpu_vm_bo_add - add a bo to a specific vm
2028  *
2029  * @adev: amdgpu_device pointer
2030  * @vm: requested vm
2031  * @bo: amdgpu buffer object
2032  *
2033  * Add @bo into the requested vm.
2034  * Add @bo to the list of bos associated with the vm
2035  * Returns newly added bo_va or NULL for failure
2036  *
2037  * Object has to be reserved!
2038  */
2039 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2040                                       struct amdgpu_vm *vm,
2041                                       struct amdgpu_bo *bo)
2042 {
2043         struct amdgpu_bo_va *bo_va;
2044
2045         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2046         if (bo_va == NULL) {
2047                 return NULL;
2048         }
2049         bo_va->base.vm = vm;
2050         bo_va->base.bo = bo;
2051         INIT_LIST_HEAD(&bo_va->base.bo_list);
2052         INIT_LIST_HEAD(&bo_va->base.vm_status);
2053
2054         bo_va->ref_count = 1;
2055         INIT_LIST_HEAD(&bo_va->valids);
2056         INIT_LIST_HEAD(&bo_va->invalids);
2057
2058         if (bo)
2059                 list_add_tail(&bo_va->base.bo_list, &bo->va);
2060
2061         return bo_va;
2062 }
2063
2064 /**
2065  * amdgpu_vm_bo_map - map bo inside a vm
2066  *
2067  * @adev: amdgpu_device pointer
2068  * @bo_va: bo_va to store the address
2069  * @saddr: where to map the BO
2070  * @offset: requested offset in the BO
2071  * @flags: attributes of pages (read/write/valid/etc.)
2072  *
2073  * Add a mapping of the BO at the specefied addr into the VM.
2074  * Returns 0 for success, error for failure.
2075  *
2076  * Object has to be reserved and unreserved outside!
2077  */
2078 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2079                      struct amdgpu_bo_va *bo_va,
2080                      uint64_t saddr, uint64_t offset,
2081                      uint64_t size, uint64_t flags)
2082 {
2083         struct amdgpu_bo_va_mapping *mapping, *tmp;
2084         struct amdgpu_bo *bo = bo_va->base.bo;
2085         struct amdgpu_vm *vm = bo_va->base.vm;
2086         uint64_t eaddr;
2087
2088         /* validate the parameters */
2089         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2090             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2091                 return -EINVAL;
2092
2093         /* make sure object fit at this offset */
2094         eaddr = saddr + size - 1;
2095         if (saddr >= eaddr ||
2096             (bo && offset + size > amdgpu_bo_size(bo)))
2097                 return -EINVAL;
2098
2099         saddr /= AMDGPU_GPU_PAGE_SIZE;
2100         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2101
2102         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2103         if (tmp) {
2104                 /* bo and tmp overlap, invalid addr */
2105                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2106                         "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2107                         tmp->start, tmp->last + 1);
2108                 return -EINVAL;
2109         }
2110
2111         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2112         if (!mapping)
2113                 return -ENOMEM;
2114
2115         INIT_LIST_HEAD(&mapping->list);
2116         mapping->start = saddr;
2117         mapping->last = eaddr;
2118         mapping->offset = offset;
2119         mapping->flags = flags;
2120
2121         list_add(&mapping->list, &bo_va->invalids);
2122         amdgpu_vm_it_insert(mapping, &vm->va);
2123
2124         if (flags & AMDGPU_PTE_PRT)
2125                 amdgpu_vm_prt_get(adev);
2126
2127         return 0;
2128 }
2129
2130 /**
2131  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2132  *
2133  * @adev: amdgpu_device pointer
2134  * @bo_va: bo_va to store the address
2135  * @saddr: where to map the BO
2136  * @offset: requested offset in the BO
2137  * @flags: attributes of pages (read/write/valid/etc.)
2138  *
2139  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2140  * mappings as we do so.
2141  * Returns 0 for success, error for failure.
2142  *
2143  * Object has to be reserved and unreserved outside!
2144  */
2145 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2146                              struct amdgpu_bo_va *bo_va,
2147                              uint64_t saddr, uint64_t offset,
2148                              uint64_t size, uint64_t flags)
2149 {
2150         struct amdgpu_bo_va_mapping *mapping;
2151         struct amdgpu_bo *bo = bo_va->base.bo;
2152         struct amdgpu_vm *vm = bo_va->base.vm;
2153         uint64_t eaddr;
2154         int r;
2155
2156         /* validate the parameters */
2157         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2158             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2159                 return -EINVAL;
2160
2161         /* make sure object fit at this offset */
2162         eaddr = saddr + size - 1;
2163         if (saddr >= eaddr ||
2164             (bo && offset + size > amdgpu_bo_size(bo)))
2165                 return -EINVAL;
2166
2167         /* Allocate all the needed memory */
2168         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2169         if (!mapping)
2170                 return -ENOMEM;
2171
2172         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2173         if (r) {
2174                 kfree(mapping);
2175                 return r;
2176         }
2177
2178         saddr /= AMDGPU_GPU_PAGE_SIZE;
2179         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2180
2181         mapping->start = saddr;
2182         mapping->last = eaddr;
2183         mapping->offset = offset;
2184         mapping->flags = flags;
2185
2186         list_add(&mapping->list, &bo_va->invalids);
2187         amdgpu_vm_it_insert(mapping, &vm->va);
2188
2189         if (flags & AMDGPU_PTE_PRT)
2190                 amdgpu_vm_prt_get(adev);
2191
2192         return 0;
2193 }
2194
2195 /**
2196  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2197  *
2198  * @adev: amdgpu_device pointer
2199  * @bo_va: bo_va to remove the address from
2200  * @saddr: where to the BO is mapped
2201  *
2202  * Remove a mapping of the BO at the specefied addr from the VM.
2203  * Returns 0 for success, error for failure.
2204  *
2205  * Object has to be reserved and unreserved outside!
2206  */
2207 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2208                        struct amdgpu_bo_va *bo_va,
2209                        uint64_t saddr)
2210 {
2211         struct amdgpu_bo_va_mapping *mapping;
2212         struct amdgpu_vm *vm = bo_va->base.vm;
2213         bool valid = true;
2214
2215         saddr /= AMDGPU_GPU_PAGE_SIZE;
2216
2217         list_for_each_entry(mapping, &bo_va->valids, list) {
2218                 if (mapping->start == saddr)
2219                         break;
2220         }
2221
2222         if (&mapping->list == &bo_va->valids) {
2223                 valid = false;
2224
2225                 list_for_each_entry(mapping, &bo_va->invalids, list) {
2226                         if (mapping->start == saddr)
2227                                 break;
2228                 }
2229
2230                 if (&mapping->list == &bo_va->invalids)
2231                         return -ENOENT;
2232         }
2233
2234         list_del(&mapping->list);
2235         amdgpu_vm_it_remove(mapping, &vm->va);
2236         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2237
2238         if (valid)
2239                 list_add(&mapping->list, &vm->freed);
2240         else
2241                 amdgpu_vm_free_mapping(adev, vm, mapping,
2242                                        bo_va->last_pt_update);
2243
2244         return 0;
2245 }
2246
2247 /**
2248  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2249  *
2250  * @adev: amdgpu_device pointer
2251  * @vm: VM structure to use
2252  * @saddr: start of the range
2253  * @size: size of the range
2254  *
2255  * Remove all mappings in a range, split them as appropriate.
2256  * Returns 0 for success, error for failure.
2257  */
2258 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2259                                 struct amdgpu_vm *vm,
2260                                 uint64_t saddr, uint64_t size)
2261 {
2262         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2263         LIST_HEAD(removed);
2264         uint64_t eaddr;
2265
2266         eaddr = saddr + size - 1;
2267         saddr /= AMDGPU_GPU_PAGE_SIZE;
2268         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2269
2270         /* Allocate all the needed memory */
2271         before = kzalloc(sizeof(*before), GFP_KERNEL);
2272         if (!before)
2273                 return -ENOMEM;
2274         INIT_LIST_HEAD(&before->list);
2275
2276         after = kzalloc(sizeof(*after), GFP_KERNEL);
2277         if (!after) {
2278                 kfree(before);
2279                 return -ENOMEM;
2280         }
2281         INIT_LIST_HEAD(&after->list);
2282
2283         /* Now gather all removed mappings */
2284         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2285         while (tmp) {
2286                 /* Remember mapping split at the start */
2287                 if (tmp->start < saddr) {
2288                         before->start = tmp->start;
2289                         before->last = saddr - 1;
2290                         before->offset = tmp->offset;
2291                         before->flags = tmp->flags;
2292                         list_add(&before->list, &tmp->list);
2293                 }
2294
2295                 /* Remember mapping split at the end */
2296                 if (tmp->last > eaddr) {
2297                         after->start = eaddr + 1;
2298                         after->last = tmp->last;
2299                         after->offset = tmp->offset;
2300                         after->offset += after->start - tmp->start;
2301                         after->flags = tmp->flags;
2302                         list_add(&after->list, &tmp->list);
2303                 }
2304
2305                 list_del(&tmp->list);
2306                 list_add(&tmp->list, &removed);
2307
2308                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2309         }
2310
2311         /* And free them up */
2312         list_for_each_entry_safe(tmp, next, &removed, list) {
2313                 amdgpu_vm_it_remove(tmp, &vm->va);
2314                 list_del(&tmp->list);
2315
2316                 if (tmp->start < saddr)
2317                     tmp->start = saddr;
2318                 if (tmp->last > eaddr)
2319                     tmp->last = eaddr;
2320
2321                 list_add(&tmp->list, &vm->freed);
2322                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2323         }
2324
2325         /* Insert partial mapping before the range */
2326         if (!list_empty(&before->list)) {
2327                 amdgpu_vm_it_insert(before, &vm->va);
2328                 if (before->flags & AMDGPU_PTE_PRT)
2329                         amdgpu_vm_prt_get(adev);
2330         } else {
2331                 kfree(before);
2332         }
2333
2334         /* Insert partial mapping after the range */
2335         if (!list_empty(&after->list)) {
2336                 amdgpu_vm_it_insert(after, &vm->va);
2337                 if (after->flags & AMDGPU_PTE_PRT)
2338                         amdgpu_vm_prt_get(adev);
2339         } else {
2340                 kfree(after);
2341         }
2342
2343         return 0;
2344 }
2345
2346 /**
2347  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2348  *
2349  * @adev: amdgpu_device pointer
2350  * @bo_va: requested bo_va
2351  *
2352  * Remove @bo_va->bo from the requested vm.
2353  *
2354  * Object have to be reserved!
2355  */
2356 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2357                       struct amdgpu_bo_va *bo_va)
2358 {
2359         struct amdgpu_bo_va_mapping *mapping, *next;
2360         struct amdgpu_vm *vm = bo_va->base.vm;
2361
2362         list_del(&bo_va->base.bo_list);
2363
2364         spin_lock(&vm->status_lock);
2365         list_del(&bo_va->base.vm_status);
2366         spin_unlock(&vm->status_lock);
2367
2368         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2369                 list_del(&mapping->list);
2370                 amdgpu_vm_it_remove(mapping, &vm->va);
2371                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2372                 list_add(&mapping->list, &vm->freed);
2373         }
2374         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2375                 list_del(&mapping->list);
2376                 amdgpu_vm_it_remove(mapping, &vm->va);
2377                 amdgpu_vm_free_mapping(adev, vm, mapping,
2378                                        bo_va->last_pt_update);
2379         }
2380
2381         dma_fence_put(bo_va->last_pt_update);
2382         kfree(bo_va);
2383 }
2384
2385 /**
2386  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2387  *
2388  * @adev: amdgpu_device pointer
2389  * @vm: requested vm
2390  * @bo: amdgpu buffer object
2391  *
2392  * Mark @bo as invalid.
2393  */
2394 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2395                              struct amdgpu_bo *bo)
2396 {
2397         struct amdgpu_vm_bo_base *bo_base;
2398
2399         list_for_each_entry(bo_base, &bo->va, bo_list) {
2400                 spin_lock(&bo_base->vm->status_lock);
2401                 if (list_empty(&bo_base->vm_status))
2402                         list_add(&bo_base->vm_status,
2403                                  &bo_base->vm->moved);
2404                 spin_unlock(&bo_base->vm->status_lock);
2405         }
2406 }
2407
2408 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2409 {
2410         /* Total bits covered by PD + PTs */
2411         unsigned bits = ilog2(vm_size) + 18;
2412
2413         /* Make sure the PD is 4K in size up to 8GB address space.
2414            Above that split equal between PD and PTs */
2415         if (vm_size <= 8)
2416                 return (bits - 9);
2417         else
2418                 return ((bits + 3) / 2);
2419 }
2420
2421 /**
2422  * amdgpu_vm_set_fragment_size - adjust fragment size in PTE
2423  *
2424  * @adev: amdgpu_device pointer
2425  * @fragment_size_default: the default fragment size if it's set auto
2426  */
2427 void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
2428 {
2429         if (amdgpu_vm_fragment_size == -1)
2430                 adev->vm_manager.fragment_size = fragment_size_default;
2431         else
2432                 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2433 }
2434
2435 /**
2436  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2437  *
2438  * @adev: amdgpu_device pointer
2439  * @vm_size: the default vm size if it's set auto
2440  */
2441 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
2442 {
2443         /* adjust vm size firstly */
2444         if (amdgpu_vm_size == -1)
2445                 adev->vm_manager.vm_size = vm_size;
2446         else
2447                 adev->vm_manager.vm_size = amdgpu_vm_size;
2448
2449         /* block size depends on vm size */
2450         if (amdgpu_vm_block_size == -1)
2451                 adev->vm_manager.block_size =
2452                         amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
2453         else
2454                 adev->vm_manager.block_size = amdgpu_vm_block_size;
2455
2456         amdgpu_vm_set_fragment_size(adev, fragment_size_default);
2457
2458         DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
2459                 adev->vm_manager.vm_size, adev->vm_manager.block_size,
2460                 adev->vm_manager.fragment_size);
2461 }
2462
2463 /**
2464  * amdgpu_vm_init - initialize a vm instance
2465  *
2466  * @adev: amdgpu_device pointer
2467  * @vm: requested vm
2468  * @vm_context: Indicates if it GFX or Compute context
2469  *
2470  * Init @vm fields.
2471  */
2472 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2473                    int vm_context)
2474 {
2475         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2476                 AMDGPU_VM_PTE_COUNT(adev) * 8);
2477         unsigned ring_instance;
2478         struct amdgpu_ring *ring;
2479         struct amd_sched_rq *rq;
2480         int r, i;
2481         u64 flags;
2482         uint64_t init_pde_value = 0;
2483
2484         vm->va = RB_ROOT;
2485         vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
2486         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2487                 vm->reserved_vmid[i] = NULL;
2488         spin_lock_init(&vm->status_lock);
2489         INIT_LIST_HEAD(&vm->moved);
2490         INIT_LIST_HEAD(&vm->cleared);
2491         INIT_LIST_HEAD(&vm->freed);
2492
2493         /* create scheduler entity for page table updates */
2494
2495         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2496         ring_instance %= adev->vm_manager.vm_pte_num_rings;
2497         ring = adev->vm_manager.vm_pte_rings[ring_instance];
2498         rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
2499         r = amd_sched_entity_init(&ring->sched, &vm->entity,
2500                                   rq, amdgpu_sched_jobs);
2501         if (r)
2502                 return r;
2503
2504         vm->pte_support_ats = false;
2505
2506         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2507                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2508                                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2509
2510                 if (adev->asic_type == CHIP_RAVEN) {
2511                         vm->pte_support_ats = true;
2512                         init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
2513                 }
2514         } else
2515                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2516                                                 AMDGPU_VM_USE_CPU_FOR_GFX);
2517         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2518                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2519         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2520                   "CPU update of VM recommended only for large BAR system\n");
2521         vm->last_dir_update = NULL;
2522
2523         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
2524                         AMDGPU_GEM_CREATE_VRAM_CLEARED;
2525         if (vm->use_cpu_for_update)
2526                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2527         else
2528                 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
2529                                 AMDGPU_GEM_CREATE_SHADOW);
2530
2531         r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
2532                              AMDGPU_GEM_DOMAIN_VRAM,
2533                              flags,
2534                              NULL, NULL, init_pde_value, &vm->root.bo);
2535         if (r)
2536                 goto error_free_sched_entity;
2537
2538         r = amdgpu_bo_reserve(vm->root.bo, false);
2539         if (r)
2540                 goto error_free_root;
2541
2542         vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
2543
2544         if (vm->use_cpu_for_update) {
2545                 r = amdgpu_bo_kmap(vm->root.bo, NULL);
2546                 if (r)
2547                         goto error_free_root;
2548         }
2549
2550         amdgpu_bo_unreserve(vm->root.bo);
2551
2552         return 0;
2553
2554 error_free_root:
2555         amdgpu_bo_unref(&vm->root.bo->shadow);
2556         amdgpu_bo_unref(&vm->root.bo);
2557         vm->root.bo = NULL;
2558
2559 error_free_sched_entity:
2560         amd_sched_entity_fini(&ring->sched, &vm->entity);
2561
2562         return r;
2563 }
2564
2565 /**
2566  * amdgpu_vm_free_levels - free PD/PT levels
2567  *
2568  * @level: PD/PT starting level to free
2569  *
2570  * Free the page directory or page table level and all sub levels.
2571  */
2572 static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
2573 {
2574         unsigned i;
2575
2576         if (level->bo) {
2577                 amdgpu_bo_unref(&level->bo->shadow);
2578                 amdgpu_bo_unref(&level->bo);
2579         }
2580
2581         if (level->entries)
2582                 for (i = 0; i <= level->last_entry_used; i++)
2583                         amdgpu_vm_free_levels(&level->entries[i]);
2584
2585         kvfree(level->entries);
2586 }
2587
2588 /**
2589  * amdgpu_vm_fini - tear down a vm instance
2590  *
2591  * @adev: amdgpu_device pointer
2592  * @vm: requested vm
2593  *
2594  * Tear down @vm.
2595  * Unbind the VM and remove all bos from the vm bo list
2596  */
2597 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2598 {
2599         struct amdgpu_bo_va_mapping *mapping, *tmp;
2600         bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
2601         int i;
2602
2603         amd_sched_entity_fini(vm->entity.sched, &vm->entity);
2604
2605         if (!RB_EMPTY_ROOT(&vm->va)) {
2606                 dev_err(adev->dev, "still active bo inside vm\n");
2607         }
2608         rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
2609                 list_del(&mapping->list);
2610                 amdgpu_vm_it_remove(mapping, &vm->va);
2611                 kfree(mapping);
2612         }
2613         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2614                 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2615                         amdgpu_vm_prt_fini(adev, vm);
2616                         prt_fini_needed = false;
2617                 }
2618
2619                 list_del(&mapping->list);
2620                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2621         }
2622
2623         amdgpu_vm_free_levels(&vm->root);
2624         dma_fence_put(vm->last_dir_update);
2625         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2626                 amdgpu_vm_free_reserved_vmid(adev, vm, i);
2627 }
2628
2629 /**
2630  * amdgpu_vm_manager_init - init the VM manager
2631  *
2632  * @adev: amdgpu_device pointer
2633  *
2634  * Initialize the VM manager structures
2635  */
2636 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2637 {
2638         unsigned i, j;
2639
2640         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
2641                 struct amdgpu_vm_id_manager *id_mgr =
2642                         &adev->vm_manager.id_mgr[i];
2643
2644                 mutex_init(&id_mgr->lock);
2645                 INIT_LIST_HEAD(&id_mgr->ids_lru);
2646                 atomic_set(&id_mgr->reserved_vmid_num, 0);
2647
2648                 /* skip over VMID 0, since it is the system VM */
2649                 for (j = 1; j < id_mgr->num_ids; ++j) {
2650                         amdgpu_vm_reset_id(adev, i, j);
2651                         amdgpu_sync_create(&id_mgr->ids[i].active);
2652                         list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
2653                 }
2654         }
2655
2656         adev->vm_manager.fence_context =
2657                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2658         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2659                 adev->vm_manager.seqno[i] = 0;
2660
2661         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2662         atomic64_set(&adev->vm_manager.client_counter, 0);
2663         spin_lock_init(&adev->vm_manager.prt_lock);
2664         atomic_set(&adev->vm_manager.num_prt_users, 0);
2665
2666         /* If not overridden by the user, by default, only in large BAR systems
2667          * Compute VM tables will be updated by CPU
2668          */
2669 #ifdef CONFIG_X86_64
2670         if (amdgpu_vm_update_mode == -1) {
2671                 if (amdgpu_vm_is_large_bar(adev))
2672                         adev->vm_manager.vm_update_mode =
2673                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2674                 else
2675                         adev->vm_manager.vm_update_mode = 0;
2676         } else
2677                 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2678 #else
2679         adev->vm_manager.vm_update_mode = 0;
2680 #endif
2681
2682 }
2683
2684 /**
2685  * amdgpu_vm_manager_fini - cleanup VM manager
2686  *
2687  * @adev: amdgpu_device pointer
2688  *
2689  * Cleanup the VM manager and free resources.
2690  */
2691 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2692 {
2693         unsigned i, j;
2694
2695         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
2696                 struct amdgpu_vm_id_manager *id_mgr =
2697                         &adev->vm_manager.id_mgr[i];
2698
2699                 mutex_destroy(&id_mgr->lock);
2700                 for (j = 0; j < AMDGPU_NUM_VM; ++j) {
2701                         struct amdgpu_vm_id *id = &id_mgr->ids[j];
2702
2703                         amdgpu_sync_free(&id->active);
2704                         dma_fence_put(id->flushed_updates);
2705                         dma_fence_put(id->last_flush);
2706                 }
2707         }
2708 }
2709
2710 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2711 {
2712         union drm_amdgpu_vm *args = data;
2713         struct amdgpu_device *adev = dev->dev_private;
2714         struct amdgpu_fpriv *fpriv = filp->driver_priv;
2715         int r;
2716
2717         switch (args->in.op) {
2718         case AMDGPU_VM_OP_RESERVE_VMID:
2719                 /* current, we only have requirement to reserve vmid from gfxhub */
2720                 r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm,
2721                                                   AMDGPU_GFXHUB);
2722                 if (r)
2723                         return r;
2724                 break;
2725         case AMDGPU_VM_OP_UNRESERVE_VMID:
2726                 amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB);
2727                 break;
2728         default:
2729                 return -EINVAL;
2730         }
2731
2732         return 0;
2733 }
This page took 0.201248 seconds and 4 git commands to generate.