]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Merge airlied/drm-next into drm-misc-next
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <drm/drmP.h>
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu.h"
32 #include "amdgpu_trace.h"
33
34 /*
35  * GPUVM
36  * GPUVM is similar to the legacy gart on older asics, however
37  * rather than there being a single global gart table
38  * for the entire GPU, there are multiple VM page tables active
39  * at any given time.  The VM page tables can contain a mix
40  * vram pages and system memory pages and system memory pages
41  * can be mapped as snooped (cached system pages) or unsnooped
42  * (uncached system pages).
43  * Each VM has an ID associated with it and there is a page table
44  * associated with each VMID.  When execting a command buffer,
45  * the kernel tells the the ring what VMID to use for that command
46  * buffer.  VMIDs are allocated dynamically as commands are submitted.
47  * The userspace drivers maintain their own address space and the kernel
48  * sets up their pages tables accordingly when they submit their
49  * command buffers and a VMID is assigned.
50  * Cayman/Trinity support up to 8 active VMs at any given time;
51  * SI supports 16.
52  */
53
54 /* Local structure. Encapsulate some VM table update parameters to reduce
55  * the number of function parameters
56  */
57 struct amdgpu_pte_update_params {
58         /* amdgpu device we do this update for */
59         struct amdgpu_device *adev;
60         /* optional amdgpu_vm we do this update for */
61         struct amdgpu_vm *vm;
62         /* address where to copy page table entries from */
63         uint64_t src;
64         /* indirect buffer to fill with commands */
65         struct amdgpu_ib *ib;
66         /* Function which actually does the update */
67         void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
68                      uint64_t addr, unsigned count, uint32_t incr,
69                      uint64_t flags);
70         /* indicate update pt or its shadow */
71         bool shadow;
72 };
73
74 /* Helper to disable partial resident texture feature from a fence callback */
75 struct amdgpu_prt_cb {
76         struct amdgpu_device *adev;
77         struct dma_fence_cb cb;
78 };
79
80 /**
81  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
82  *
83  * @adev: amdgpu_device pointer
84  *
85  * Calculate the number of entries in a page directory or page table.
86  */
87 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
88                                       unsigned level)
89 {
90         if (level == 0)
91                 /* For the root directory */
92                 return adev->vm_manager.max_pfn >>
93                         (amdgpu_vm_block_size * adev->vm_manager.num_level);
94         else if (level == adev->vm_manager.num_level)
95                 /* For the page tables on the leaves */
96                 return AMDGPU_VM_PTE_COUNT;
97         else
98                 /* Everything in between */
99                 return 1 << amdgpu_vm_block_size;
100 }
101
102 /**
103  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
104  *
105  * @adev: amdgpu_device pointer
106  *
107  * Calculate the size of the BO for a page directory or page table in bytes.
108  */
109 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
110 {
111         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
112 }
113
114 /**
115  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
116  *
117  * @vm: vm providing the BOs
118  * @validated: head of validation list
119  * @entry: entry to add
120  *
121  * Add the page directory to the list of BOs to
122  * validate for command submission.
123  */
124 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
125                          struct list_head *validated,
126                          struct amdgpu_bo_list_entry *entry)
127 {
128         entry->robj = vm->root.bo;
129         entry->priority = 0;
130         entry->tv.bo = &entry->robj->tbo;
131         entry->tv.shared = true;
132         entry->user_pages = NULL;
133         list_add(&entry->tv.head, validated);
134 }
135
136 /**
137  * amdgpu_vm_validate_layer - validate a single page table level
138  *
139  * @parent: parent page table level
140  * @validate: callback to do the validation
141  * @param: parameter for the validation callback
142  *
143  * Validate the page table BOs on command submission if neccessary.
144  */
145 static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
146                                     int (*validate)(void *, struct amdgpu_bo *),
147                                     void *param)
148 {
149         unsigned i;
150         int r;
151
152         if (!parent->entries)
153                 return 0;
154
155         for (i = 0; i <= parent->last_entry_used; ++i) {
156                 struct amdgpu_vm_pt *entry = &parent->entries[i];
157
158                 if (!entry->bo)
159                         continue;
160
161                 r = validate(param, entry->bo);
162                 if (r)
163                         return r;
164
165                 /*
166                  * Recurse into the sub directory. This is harmless because we
167                  * have only a maximum of 5 layers.
168                  */
169                 r = amdgpu_vm_validate_level(entry, validate, param);
170                 if (r)
171                         return r;
172         }
173
174         return r;
175 }
176
177 /**
178  * amdgpu_vm_validate_pt_bos - validate the page table BOs
179  *
180  * @adev: amdgpu device pointer
181  * @vm: vm providing the BOs
182  * @validate: callback to do the validation
183  * @param: parameter for the validation callback
184  *
185  * Validate the page table BOs on command submission if neccessary.
186  */
187 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
188                               int (*validate)(void *p, struct amdgpu_bo *bo),
189                               void *param)
190 {
191         uint64_t num_evictions;
192
193         /* We only need to validate the page tables
194          * if they aren't already valid.
195          */
196         num_evictions = atomic64_read(&adev->num_evictions);
197         if (num_evictions == vm->last_eviction_counter)
198                 return 0;
199
200         return amdgpu_vm_validate_level(&vm->root, validate, param);
201 }
202
203 /**
204  * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
205  *
206  * @adev: amdgpu device instance
207  * @vm: vm providing the BOs
208  *
209  * Move the PT BOs to the tail of the LRU.
210  */
211 static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
212 {
213         unsigned i;
214
215         if (!parent->entries)
216                 return;
217
218         for (i = 0; i <= parent->last_entry_used; ++i) {
219                 struct amdgpu_vm_pt *entry = &parent->entries[i];
220
221                 if (!entry->bo)
222                         continue;
223
224                 ttm_bo_move_to_lru_tail(&entry->bo->tbo);
225                 amdgpu_vm_move_level_in_lru(entry);
226         }
227 }
228
229 /**
230  * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
231  *
232  * @adev: amdgpu device instance
233  * @vm: vm providing the BOs
234  *
235  * Move the PT BOs to the tail of the LRU.
236  */
237 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
238                                   struct amdgpu_vm *vm)
239 {
240         struct ttm_bo_global *glob = adev->mman.bdev.glob;
241
242         spin_lock(&glob->lru_lock);
243         amdgpu_vm_move_level_in_lru(&vm->root);
244         spin_unlock(&glob->lru_lock);
245 }
246
247  /**
248  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
249  *
250  * @adev: amdgpu_device pointer
251  * @vm: requested vm
252  * @saddr: start of the address range
253  * @eaddr: end of the address range
254  *
255  * Make sure the page directories and page tables are allocated
256  */
257 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
258                                   struct amdgpu_vm *vm,
259                                   struct amdgpu_vm_pt *parent,
260                                   uint64_t saddr, uint64_t eaddr,
261                                   unsigned level)
262 {
263         unsigned shift = (adev->vm_manager.num_level - level) *
264                 amdgpu_vm_block_size;
265         unsigned pt_idx, from, to;
266         int r;
267
268         if (!parent->entries) {
269                 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
270
271                 parent->entries = drm_calloc_large(num_entries,
272                                                    sizeof(struct amdgpu_vm_pt));
273                 if (!parent->entries)
274                         return -ENOMEM;
275                 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
276         }
277
278         from = saddr >> shift;
279         to = eaddr >> shift;
280         if (from >= amdgpu_vm_num_entries(adev, level) ||
281             to >= amdgpu_vm_num_entries(adev, level))
282                 return -EINVAL;
283
284         if (to > parent->last_entry_used)
285                 parent->last_entry_used = to;
286
287         ++level;
288         saddr = saddr & ((1 << shift) - 1);
289         eaddr = eaddr & ((1 << shift) - 1);
290
291         /* walk over the address space and allocate the page tables */
292         for (pt_idx = from; pt_idx <= to; ++pt_idx) {
293                 struct reservation_object *resv = vm->root.bo->tbo.resv;
294                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
295                 struct amdgpu_bo *pt;
296
297                 if (!entry->bo) {
298                         r = amdgpu_bo_create(adev,
299                                              amdgpu_vm_bo_size(adev, level),
300                                              AMDGPU_GPU_PAGE_SIZE, true,
301                                              AMDGPU_GEM_DOMAIN_VRAM,
302                                              AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
303                                              AMDGPU_GEM_CREATE_SHADOW |
304                                              AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
305                                              AMDGPU_GEM_CREATE_VRAM_CLEARED,
306                                              NULL, resv, &pt);
307                         if (r)
308                                 return r;
309
310                         /* Keep a reference to the root directory to avoid
311                         * freeing them up in the wrong order.
312                         */
313                         pt->parent = amdgpu_bo_ref(vm->root.bo);
314
315                         entry->bo = pt;
316                         entry->addr = 0;
317                 }
318
319                 if (level < adev->vm_manager.num_level) {
320                         uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
321                         uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
322                                 ((1 << shift) - 1);
323                         r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
324                                                    sub_eaddr, level);
325                         if (r)
326                                 return r;
327                 }
328         }
329
330         return 0;
331 }
332
333 /**
334  * amdgpu_vm_alloc_pts - Allocate page tables.
335  *
336  * @adev: amdgpu_device pointer
337  * @vm: VM to allocate page tables for
338  * @saddr: Start address which needs to be allocated
339  * @size: Size from start address we need.
340  *
341  * Make sure the page tables are allocated.
342  */
343 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
344                         struct amdgpu_vm *vm,
345                         uint64_t saddr, uint64_t size)
346 {
347         uint64_t last_pfn;
348         uint64_t eaddr;
349
350         /* validate the parameters */
351         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
352                 return -EINVAL;
353
354         eaddr = saddr + size - 1;
355         last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
356         if (last_pfn >= adev->vm_manager.max_pfn) {
357                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
358                         last_pfn, adev->vm_manager.max_pfn);
359                 return -EINVAL;
360         }
361
362         saddr /= AMDGPU_GPU_PAGE_SIZE;
363         eaddr /= AMDGPU_GPU_PAGE_SIZE;
364
365         return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
366 }
367
368 static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
369                               struct amdgpu_vm_id *id)
370 {
371         return id->current_gpu_reset_count !=
372                 atomic_read(&adev->gpu_reset_counter) ? true : false;
373 }
374
375 /**
376  * amdgpu_vm_grab_id - allocate the next free VMID
377  *
378  * @vm: vm to allocate id for
379  * @ring: ring we want to submit job to
380  * @sync: sync object where we add dependencies
381  * @fence: fence protecting ID from reuse
382  *
383  * Allocate an id for the vm, adding fences to the sync obj as necessary.
384  */
385 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
386                       struct amdgpu_sync *sync, struct dma_fence *fence,
387                       struct amdgpu_job *job)
388 {
389         struct amdgpu_device *adev = ring->adev;
390         uint64_t fence_context = adev->fence_context + ring->idx;
391         struct dma_fence *updates = sync->last_vm_update;
392         struct amdgpu_vm_id *id, *idle;
393         struct dma_fence **fences;
394         unsigned i;
395         int r = 0;
396
397         fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
398                                GFP_KERNEL);
399         if (!fences)
400                 return -ENOMEM;
401
402         mutex_lock(&adev->vm_manager.lock);
403
404         /* Check if we have an idle VMID */
405         i = 0;
406         list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
407                 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
408                 if (!fences[i])
409                         break;
410                 ++i;
411         }
412
413         /* If we can't find a idle VMID to use, wait till one becomes available */
414         if (&idle->list == &adev->vm_manager.ids_lru) {
415                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
416                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
417                 struct dma_fence_array *array;
418                 unsigned j;
419
420                 for (j = 0; j < i; ++j)
421                         dma_fence_get(fences[j]);
422
423                 array = dma_fence_array_create(i, fences, fence_context,
424                                            seqno, true);
425                 if (!array) {
426                         for (j = 0; j < i; ++j)
427                                 dma_fence_put(fences[j]);
428                         kfree(fences);
429                         r = -ENOMEM;
430                         goto error;
431                 }
432
433
434                 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
435                 dma_fence_put(&array->base);
436                 if (r)
437                         goto error;
438
439                 mutex_unlock(&adev->vm_manager.lock);
440                 return 0;
441
442         }
443         kfree(fences);
444
445         job->vm_needs_flush = true;
446         /* Check if we can use a VMID already assigned to this VM */
447         i = ring->idx;
448         do {
449                 struct dma_fence *flushed;
450
451                 id = vm->ids[i++];
452                 if (i == AMDGPU_MAX_RINGS)
453                         i = 0;
454
455                 /* Check all the prerequisites to using this VMID */
456                 if (!id)
457                         continue;
458                 if (amdgpu_vm_is_gpu_reset(adev, id))
459                         continue;
460
461                 if (atomic64_read(&id->owner) != vm->client_id)
462                         continue;
463
464                 if (job->vm_pd_addr != id->pd_gpu_addr)
465                         continue;
466
467                 if (!id->last_flush)
468                         continue;
469
470                 if (id->last_flush->context != fence_context &&
471                     !dma_fence_is_signaled(id->last_flush))
472                         continue;
473
474                 flushed  = id->flushed_updates;
475                 if (updates &&
476                     (!flushed || dma_fence_is_later(updates, flushed)))
477                         continue;
478
479                 /* Good we can use this VMID. Remember this submission as
480                  * user of the VMID.
481                  */
482                 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
483                 if (r)
484                         goto error;
485
486                 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
487                 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
488                 vm->ids[ring->idx] = id;
489
490                 job->vm_id = id - adev->vm_manager.ids;
491                 job->vm_needs_flush = false;
492                 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
493
494                 mutex_unlock(&adev->vm_manager.lock);
495                 return 0;
496
497         } while (i != ring->idx);
498
499         /* Still no ID to use? Then use the idle one found earlier */
500         id = idle;
501
502         /* Remember this submission as user of the VMID */
503         r = amdgpu_sync_fence(ring->adev, &id->active, fence);
504         if (r)
505                 goto error;
506
507         dma_fence_put(id->first);
508         id->first = dma_fence_get(fence);
509
510         dma_fence_put(id->last_flush);
511         id->last_flush = NULL;
512
513         dma_fence_put(id->flushed_updates);
514         id->flushed_updates = dma_fence_get(updates);
515
516         id->pd_gpu_addr = job->vm_pd_addr;
517         id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
518         list_move_tail(&id->list, &adev->vm_manager.ids_lru);
519         atomic64_set(&id->owner, vm->client_id);
520         vm->ids[ring->idx] = id;
521
522         job->vm_id = id - adev->vm_manager.ids;
523         trace_amdgpu_vm_grab_id(vm, ring->idx, job);
524
525 error:
526         mutex_unlock(&adev->vm_manager.lock);
527         return r;
528 }
529
530 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
531 {
532         struct amdgpu_device *adev = ring->adev;
533         const struct amdgpu_ip_block *ip_block;
534
535         if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
536                 /* only compute rings */
537                 return false;
538
539         ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
540         if (!ip_block)
541                 return false;
542
543         if (ip_block->version->major <= 7) {
544                 /* gfx7 has no workaround */
545                 return true;
546         } else if (ip_block->version->major == 8) {
547                 if (adev->gfx.mec_fw_version >= 673)
548                         /* gfx8 is fixed in MEC firmware 673 */
549                         return false;
550                 else
551                         return true;
552         }
553         return false;
554 }
555
556 static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
557 {
558         u64 addr = mc_addr;
559
560         if (adev->mc.mc_funcs && adev->mc.mc_funcs->adjust_mc_addr)
561                 addr = adev->mc.mc_funcs->adjust_mc_addr(adev, addr);
562
563         return addr;
564 }
565
566 /**
567  * amdgpu_vm_flush - hardware flush the vm
568  *
569  * @ring: ring to use for flush
570  * @vm_id: vmid number to use
571  * @pd_addr: address of the page directory
572  *
573  * Emit a VM flush when it is necessary.
574  */
575 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
576 {
577         struct amdgpu_device *adev = ring->adev;
578         struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
579         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
580                 id->gds_base != job->gds_base ||
581                 id->gds_size != job->gds_size ||
582                 id->gws_base != job->gws_base ||
583                 id->gws_size != job->gws_size ||
584                 id->oa_base != job->oa_base ||
585                 id->oa_size != job->oa_size);
586         int r;
587
588         if (job->vm_needs_flush || gds_switch_needed ||
589                 amdgpu_vm_is_gpu_reset(adev, id) ||
590                 amdgpu_vm_ring_has_compute_vm_bug(ring)) {
591                 unsigned patch_offset = 0;
592
593                 if (ring->funcs->init_cond_exec)
594                         patch_offset = amdgpu_ring_init_cond_exec(ring);
595
596                 if (ring->funcs->emit_pipeline_sync &&
597                         (job->vm_needs_flush || gds_switch_needed ||
598                         amdgpu_vm_ring_has_compute_vm_bug(ring)))
599                         amdgpu_ring_emit_pipeline_sync(ring);
600
601                 if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
602                         amdgpu_vm_is_gpu_reset(adev, id))) {
603                         struct dma_fence *fence;
604                         u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
605
606                         trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
607                         amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
608
609                         r = amdgpu_fence_emit(ring, &fence);
610                         if (r)
611                                 return r;
612
613                         mutex_lock(&adev->vm_manager.lock);
614                         dma_fence_put(id->last_flush);
615                         id->last_flush = fence;
616                         mutex_unlock(&adev->vm_manager.lock);
617                 }
618
619                 if (gds_switch_needed) {
620                         id->gds_base = job->gds_base;
621                         id->gds_size = job->gds_size;
622                         id->gws_base = job->gws_base;
623                         id->gws_size = job->gws_size;
624                         id->oa_base = job->oa_base;
625                         id->oa_size = job->oa_size;
626                         amdgpu_ring_emit_gds_switch(ring, job->vm_id,
627                                                         job->gds_base, job->gds_size,
628                                                         job->gws_base, job->gws_size,
629                                                         job->oa_base, job->oa_size);
630                 }
631
632                 if (ring->funcs->patch_cond_exec)
633                         amdgpu_ring_patch_cond_exec(ring, patch_offset);
634
635                 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
636                 if (ring->funcs->emit_switch_buffer) {
637                         amdgpu_ring_emit_switch_buffer(ring);
638                         amdgpu_ring_emit_switch_buffer(ring);
639                 }
640         }
641         return 0;
642 }
643
644 /**
645  * amdgpu_vm_reset_id - reset VMID to zero
646  *
647  * @adev: amdgpu device structure
648  * @vm_id: vmid number to use
649  *
650  * Reset saved GDW, GWS and OA to force switch on next flush.
651  */
652 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
653 {
654         struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
655
656         id->gds_base = 0;
657         id->gds_size = 0;
658         id->gws_base = 0;
659         id->gws_size = 0;
660         id->oa_base = 0;
661         id->oa_size = 0;
662 }
663
664 /**
665  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
666  *
667  * @vm: requested vm
668  * @bo: requested buffer object
669  *
670  * Find @bo inside the requested vm.
671  * Search inside the @bos vm list for the requested vm
672  * Returns the found bo_va or NULL if none is found
673  *
674  * Object has to be reserved!
675  */
676 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
677                                        struct amdgpu_bo *bo)
678 {
679         struct amdgpu_bo_va *bo_va;
680
681         list_for_each_entry(bo_va, &bo->va, bo_list) {
682                 if (bo_va->vm == vm) {
683                         return bo_va;
684                 }
685         }
686         return NULL;
687 }
688
689 /**
690  * amdgpu_vm_do_set_ptes - helper to call the right asic function
691  *
692  * @params: see amdgpu_pte_update_params definition
693  * @pe: addr of the page entry
694  * @addr: dst addr to write into pe
695  * @count: number of page entries to update
696  * @incr: increase next addr by incr bytes
697  * @flags: hw access flags
698  *
699  * Traces the parameters and calls the right asic functions
700  * to setup the page table using the DMA.
701  */
702 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
703                                   uint64_t pe, uint64_t addr,
704                                   unsigned count, uint32_t incr,
705                                   uint64_t flags)
706 {
707         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
708
709         if (count < 3) {
710                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
711                                     addr | flags, count, incr);
712
713         } else {
714                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
715                                       count, incr, flags);
716         }
717 }
718
719 /**
720  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
721  *
722  * @params: see amdgpu_pte_update_params definition
723  * @pe: addr of the page entry
724  * @addr: dst addr to write into pe
725  * @count: number of page entries to update
726  * @incr: increase next addr by incr bytes
727  * @flags: hw access flags
728  *
729  * Traces the parameters and calls the DMA function to copy the PTEs.
730  */
731 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
732                                    uint64_t pe, uint64_t addr,
733                                    unsigned count, uint32_t incr,
734                                    uint64_t flags)
735 {
736         uint64_t src = (params->src + (addr >> 12) * 8);
737
738
739         trace_amdgpu_vm_copy_ptes(pe, src, count);
740
741         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
742 }
743
744 /**
745  * amdgpu_vm_map_gart - Resolve gart mapping of addr
746  *
747  * @pages_addr: optional DMA address to use for lookup
748  * @addr: the unmapped addr
749  *
750  * Look up the physical address of the page that the pte resolves
751  * to and return the pointer for the page table entry.
752  */
753 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
754 {
755         uint64_t result;
756
757         /* page table offset */
758         result = pages_addr[addr >> PAGE_SHIFT];
759
760         /* in case cpu page size != gpu page size*/
761         result |= addr & (~PAGE_MASK);
762
763         result &= 0xFFFFFFFFFFFFF000ULL;
764
765         return result;
766 }
767
768 /*
769  * amdgpu_vm_update_level - update a single level in the hierarchy
770  *
771  * @adev: amdgpu_device pointer
772  * @vm: requested vm
773  * @parent: parent directory
774  *
775  * Makes sure all entries in @parent are up to date.
776  * Returns 0 for success, error for failure.
777  */
778 static int amdgpu_vm_update_level(struct amdgpu_device *adev,
779                                   struct amdgpu_vm *vm,
780                                   struct amdgpu_vm_pt *parent,
781                                   unsigned level)
782 {
783         struct amdgpu_bo *shadow;
784         struct amdgpu_ring *ring;
785         uint64_t pd_addr, shadow_addr;
786         uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
787         uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
788         unsigned count = 0, pt_idx, ndw;
789         struct amdgpu_job *job;
790         struct amdgpu_pte_update_params params;
791         struct dma_fence *fence = NULL;
792
793         int r;
794
795         if (!parent->entries)
796                 return 0;
797         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
798
799         /* padding, etc. */
800         ndw = 64;
801
802         /* assume the worst case */
803         ndw += parent->last_entry_used * 6;
804
805         pd_addr = amdgpu_bo_gpu_offset(parent->bo);
806
807         shadow = parent->bo->shadow;
808         if (shadow) {
809                 r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
810                 if (r)
811                         return r;
812                 shadow_addr = amdgpu_bo_gpu_offset(shadow);
813                 ndw *= 2;
814         } else {
815                 shadow_addr = 0;
816         }
817
818         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
819         if (r)
820                 return r;
821
822         memset(&params, 0, sizeof(params));
823         params.adev = adev;
824         params.ib = &job->ibs[0];
825
826         /* walk over the address space and update the directory */
827         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
828                 struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
829                 uint64_t pde, pt;
830
831                 if (bo == NULL)
832                         continue;
833
834                 if (bo->shadow) {
835                         struct amdgpu_bo *pt_shadow = bo->shadow;
836
837                         r = amdgpu_ttm_bind(&pt_shadow->tbo,
838                                             &pt_shadow->tbo.mem);
839                         if (r)
840                                 return r;
841                 }
842
843                 pt = amdgpu_bo_gpu_offset(bo);
844                 if (parent->entries[pt_idx].addr == pt)
845                         continue;
846
847                 parent->entries[pt_idx].addr = pt;
848
849                 pde = pd_addr + pt_idx * 8;
850                 if (((last_pde + 8 * count) != pde) ||
851                     ((last_pt + incr * count) != pt) ||
852                     (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
853
854                         if (count) {
855                                 uint64_t pt_addr =
856                                         amdgpu_vm_adjust_mc_addr(adev, last_pt);
857
858                                 if (shadow)
859                                         amdgpu_vm_do_set_ptes(&params,
860                                                               last_shadow,
861                                                               pt_addr, count,
862                                                               incr,
863                                                               AMDGPU_PTE_VALID);
864
865                                 amdgpu_vm_do_set_ptes(&params, last_pde,
866                                                       pt_addr, count, incr,
867                                                       AMDGPU_PTE_VALID);
868                         }
869
870                         count = 1;
871                         last_pde = pde;
872                         last_shadow = shadow_addr + pt_idx * 8;
873                         last_pt = pt;
874                 } else {
875                         ++count;
876                 }
877         }
878
879         if (count) {
880                 uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt);
881
882                 if (vm->root.bo->shadow)
883                         amdgpu_vm_do_set_ptes(&params, last_shadow, pt_addr,
884                                               count, incr, AMDGPU_PTE_VALID);
885
886                 amdgpu_vm_do_set_ptes(&params, last_pde, pt_addr,
887                                       count, incr, AMDGPU_PTE_VALID);
888         }
889
890         if (params.ib->length_dw == 0) {
891                 amdgpu_job_free(job);
892         } else {
893                 amdgpu_ring_pad_ib(ring, params.ib);
894                 amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
895                                  AMDGPU_FENCE_OWNER_VM);
896                 if (shadow)
897                         amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
898                                          AMDGPU_FENCE_OWNER_VM);
899
900                 WARN_ON(params.ib->length_dw > ndw);
901                 r = amdgpu_job_submit(job, ring, &vm->entity,
902                                 AMDGPU_FENCE_OWNER_VM, &fence);
903                 if (r)
904                         goto error_free;
905
906                 amdgpu_bo_fence(parent->bo, fence, true);
907                 dma_fence_put(vm->last_dir_update);
908                 vm->last_dir_update = dma_fence_get(fence);
909                 dma_fence_put(fence);
910         }
911         /*
912          * Recurse into the subdirectories. This recursion is harmless because
913          * we only have a maximum of 5 layers.
914          */
915         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
916                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
917
918                 if (!entry->bo)
919                         continue;
920
921                 r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
922                 if (r)
923                         return r;
924         }
925
926         return 0;
927
928 error_free:
929         amdgpu_job_free(job);
930         return r;
931 }
932
933 /*
934  * amdgpu_vm_update_directories - make sure that all directories are valid
935  *
936  * @adev: amdgpu_device pointer
937  * @vm: requested vm
938  *
939  * Makes sure all directories are up to date.
940  * Returns 0 for success, error for failure.
941  */
942 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
943                                  struct amdgpu_vm *vm)
944 {
945         return amdgpu_vm_update_level(adev, vm, &vm->root, 0);
946 }
947
948 /**
949  * amdgpu_vm_find_pt - find the page table for an address
950  *
951  * @p: see amdgpu_pte_update_params definition
952  * @addr: virtual address in question
953  *
954  * Find the page table BO for a virtual address, return NULL when none found.
955  */
956 static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
957                                           uint64_t addr)
958 {
959         struct amdgpu_vm_pt *entry = &p->vm->root;
960         unsigned idx, level = p->adev->vm_manager.num_level;
961
962         while (entry->entries) {
963                 idx = addr >> (amdgpu_vm_block_size * level--);
964                 idx %= amdgpu_bo_size(entry->bo) / 8;
965                 entry = &entry->entries[idx];
966         }
967
968         if (level)
969                 return NULL;
970
971         return entry->bo;
972 }
973
974 /**
975  * amdgpu_vm_update_ptes - make sure that page tables are valid
976  *
977  * @params: see amdgpu_pte_update_params definition
978  * @vm: requested vm
979  * @start: start of GPU address range
980  * @end: end of GPU address range
981  * @dst: destination address to map to, the next dst inside the function
982  * @flags: mapping flags
983  *
984  * Update the page tables in the range @start - @end.
985  */
986 static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
987                                   uint64_t start, uint64_t end,
988                                   uint64_t dst, uint64_t flags)
989 {
990         const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
991
992         uint64_t cur_pe_start, cur_nptes, cur_dst;
993         uint64_t addr; /* next GPU address to be updated */
994         struct amdgpu_bo *pt;
995         unsigned nptes; /* next number of ptes to be updated */
996         uint64_t next_pe_start;
997
998         /* initialize the variables */
999         addr = start;
1000         pt = amdgpu_vm_get_pt(params, addr);
1001         if (!pt) {
1002                 pr_err("PT not found, aborting update_ptes\n");
1003                 return;
1004         }
1005
1006         if (params->shadow) {
1007                 if (!pt->shadow)
1008                         return;
1009                 pt = pt->shadow;
1010         }
1011         if ((addr & ~mask) == (end & ~mask))
1012                 nptes = end - addr;
1013         else
1014                 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
1015
1016         cur_pe_start = amdgpu_bo_gpu_offset(pt);
1017         cur_pe_start += (addr & mask) * 8;
1018         cur_nptes = nptes;
1019         cur_dst = dst;
1020
1021         /* for next ptb*/
1022         addr += nptes;
1023         dst += nptes * AMDGPU_GPU_PAGE_SIZE;
1024
1025         /* walk over the address space and update the page tables */
1026         while (addr < end) {
1027                 pt = amdgpu_vm_get_pt(params, addr);
1028                 if (!pt) {
1029                         pr_err("PT not found, aborting update_ptes\n");
1030                         return;
1031                 }
1032
1033                 if (params->shadow) {
1034                         if (!pt->shadow)
1035                                 return;
1036                         pt = pt->shadow;
1037                 }
1038
1039                 if ((addr & ~mask) == (end & ~mask))
1040                         nptes = end - addr;
1041                 else
1042                         nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
1043
1044                 next_pe_start = amdgpu_bo_gpu_offset(pt);
1045                 next_pe_start += (addr & mask) * 8;
1046
1047                 if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
1048                     ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
1049                         /* The next ptb is consecutive to current ptb.
1050                          * Don't call the update function now.
1051                          * Will update two ptbs together in future.
1052                         */
1053                         cur_nptes += nptes;
1054                 } else {
1055                         params->func(params, cur_pe_start, cur_dst, cur_nptes,
1056                                      AMDGPU_GPU_PAGE_SIZE, flags);
1057
1058                         cur_pe_start = next_pe_start;
1059                         cur_nptes = nptes;
1060                         cur_dst = dst;
1061                 }
1062
1063                 /* for next ptb*/
1064                 addr += nptes;
1065                 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
1066         }
1067
1068         params->func(params, cur_pe_start, cur_dst, cur_nptes,
1069                      AMDGPU_GPU_PAGE_SIZE, flags);
1070 }
1071
1072 /*
1073  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1074  *
1075  * @params: see amdgpu_pte_update_params definition
1076  * @vm: requested vm
1077  * @start: first PTE to handle
1078  * @end: last PTE to handle
1079  * @dst: addr those PTEs should point to
1080  * @flags: hw mapping flags
1081  */
1082 static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
1083                                 uint64_t start, uint64_t end,
1084                                 uint64_t dst, uint64_t flags)
1085 {
1086         /**
1087          * The MC L1 TLB supports variable sized pages, based on a fragment
1088          * field in the PTE. When this field is set to a non-zero value, page
1089          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1090          * flags are considered valid for all PTEs within the fragment range
1091          * and corresponding mappings are assumed to be physically contiguous.
1092          *
1093          * The L1 TLB can store a single PTE for the whole fragment,
1094          * significantly increasing the space available for translation
1095          * caching. This leads to large improvements in throughput when the
1096          * TLB is under pressure.
1097          *
1098          * The L2 TLB distributes small and large fragments into two
1099          * asymmetric partitions. The large fragment cache is significantly
1100          * larger. Thus, we try to use large fragments wherever possible.
1101          * Userspace can support this by aligning virtual base address and
1102          * allocation size to the fragment size.
1103          */
1104
1105         /* SI and newer are optimized for 64KB */
1106         uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
1107         uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
1108
1109         uint64_t frag_start = ALIGN(start, frag_align);
1110         uint64_t frag_end = end & ~(frag_align - 1);
1111
1112         /* system pages are non continuously */
1113         if (params->src || !(flags & AMDGPU_PTE_VALID) ||
1114             (frag_start >= frag_end)) {
1115
1116                 amdgpu_vm_update_ptes(params, start, end, dst, flags);
1117                 return;
1118         }
1119
1120         /* handle the 4K area at the beginning */
1121         if (start != frag_start) {
1122                 amdgpu_vm_update_ptes(params, start, frag_start,
1123                                       dst, flags);
1124                 dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
1125         }
1126
1127         /* handle the area in the middle */
1128         amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
1129                               flags | frag_flags);
1130
1131         /* handle the 4K area at the end */
1132         if (frag_end != end) {
1133                 dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
1134                 amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
1135         }
1136 }
1137
1138 /**
1139  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1140  *
1141  * @adev: amdgpu_device pointer
1142  * @exclusive: fence we need to sync to
1143  * @src: address where to copy page table entries from
1144  * @pages_addr: DMA addresses to use for mapping
1145  * @vm: requested vm
1146  * @start: start of mapped range
1147  * @last: last mapped entry
1148  * @flags: flags for the entries
1149  * @addr: addr to set the area to
1150  * @fence: optional resulting fence
1151  *
1152  * Fill in the page table entries between @start and @last.
1153  * Returns 0 for success, -EINVAL for failure.
1154  */
1155 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1156                                        struct dma_fence *exclusive,
1157                                        uint64_t src,
1158                                        dma_addr_t *pages_addr,
1159                                        struct amdgpu_vm *vm,
1160                                        uint64_t start, uint64_t last,
1161                                        uint64_t flags, uint64_t addr,
1162                                        struct dma_fence **fence)
1163 {
1164         struct amdgpu_ring *ring;
1165         void *owner = AMDGPU_FENCE_OWNER_VM;
1166         unsigned nptes, ncmds, ndw;
1167         struct amdgpu_job *job;
1168         struct amdgpu_pte_update_params params;
1169         struct dma_fence *f = NULL;
1170         int r;
1171
1172         memset(&params, 0, sizeof(params));
1173         params.adev = adev;
1174         params.vm = vm;
1175         params.src = src;
1176
1177         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1178
1179         /* sync to everything on unmapping */
1180         if (!(flags & AMDGPU_PTE_VALID))
1181                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1182
1183         nptes = last - start + 1;
1184
1185         /*
1186          * reserve space for one command every (1 << BLOCK_SIZE)
1187          *  entries or 2k dwords (whatever is smaller)
1188          */
1189         ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
1190
1191         /* padding, etc. */
1192         ndw = 64;
1193
1194         if (src) {
1195                 /* only copy commands needed */
1196                 ndw += ncmds * 7;
1197
1198                 params.func = amdgpu_vm_do_copy_ptes;
1199
1200         } else if (pages_addr) {
1201                 /* copy commands needed */
1202                 ndw += ncmds * 7;
1203
1204                 /* and also PTEs */
1205                 ndw += nptes * 2;
1206
1207                 params.func = amdgpu_vm_do_copy_ptes;
1208
1209         } else {
1210                 /* set page commands needed */
1211                 ndw += ncmds * 10;
1212
1213                 /* two extra commands for begin/end of fragment */
1214                 ndw += 2 * 10;
1215
1216                 params.func = amdgpu_vm_do_set_ptes;
1217         }
1218
1219         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1220         if (r)
1221                 return r;
1222
1223         params.ib = &job->ibs[0];
1224
1225         if (!src && pages_addr) {
1226                 uint64_t *pte;
1227                 unsigned i;
1228
1229                 /* Put the PTEs at the end of the IB. */
1230                 i = ndw - nptes * 2;
1231                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1232                 params.src = job->ibs->gpu_addr + i * 4;
1233
1234                 for (i = 0; i < nptes; ++i) {
1235                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1236                                                     AMDGPU_GPU_PAGE_SIZE);
1237                         pte[i] |= flags;
1238                 }
1239                 addr = 0;
1240         }
1241
1242         r = amdgpu_sync_fence(adev, &job->sync, exclusive);
1243         if (r)
1244                 goto error_free;
1245
1246         r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
1247                              owner);
1248         if (r)
1249                 goto error_free;
1250
1251         r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
1252         if (r)
1253                 goto error_free;
1254
1255         params.shadow = true;
1256         amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1257         params.shadow = false;
1258         amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1259
1260         amdgpu_ring_pad_ib(ring, params.ib);
1261         WARN_ON(params.ib->length_dw > ndw);
1262         r = amdgpu_job_submit(job, ring, &vm->entity,
1263                               AMDGPU_FENCE_OWNER_VM, &f);
1264         if (r)
1265                 goto error_free;
1266
1267         amdgpu_bo_fence(vm->root.bo, f, true);
1268         dma_fence_put(*fence);
1269         *fence = f;
1270         return 0;
1271
1272 error_free:
1273         amdgpu_job_free(job);
1274         return r;
1275 }
1276
1277 /**
1278  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1279  *
1280  * @adev: amdgpu_device pointer
1281  * @exclusive: fence we need to sync to
1282  * @gtt_flags: flags as they are used for GTT
1283  * @pages_addr: DMA addresses to use for mapping
1284  * @vm: requested vm
1285  * @mapping: mapped range and flags to use for the update
1286  * @flags: HW flags for the mapping
1287  * @nodes: array of drm_mm_nodes with the MC addresses
1288  * @fence: optional resulting fence
1289  *
1290  * Split the mapping into smaller chunks so that each update fits
1291  * into a SDMA IB.
1292  * Returns 0 for success, -EINVAL for failure.
1293  */
1294 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1295                                       struct dma_fence *exclusive,
1296                                       uint64_t gtt_flags,
1297                                       dma_addr_t *pages_addr,
1298                                       struct amdgpu_vm *vm,
1299                                       struct amdgpu_bo_va_mapping *mapping,
1300                                       uint64_t flags,
1301                                       struct drm_mm_node *nodes,
1302                                       struct dma_fence **fence)
1303 {
1304         uint64_t pfn, src = 0, start = mapping->it.start;
1305         int r;
1306
1307         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1308          * but in case of something, we filter the flags in first place
1309          */
1310         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1311                 flags &= ~AMDGPU_PTE_READABLE;
1312         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1313                 flags &= ~AMDGPU_PTE_WRITEABLE;
1314
1315         flags &= ~AMDGPU_PTE_EXECUTABLE;
1316         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1317
1318         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1319         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1320
1321         trace_amdgpu_vm_bo_update(mapping);
1322
1323         pfn = mapping->offset >> PAGE_SHIFT;
1324         if (nodes) {
1325                 while (pfn >= nodes->size) {
1326                         pfn -= nodes->size;
1327                         ++nodes;
1328                 }
1329         }
1330
1331         do {
1332                 uint64_t max_entries;
1333                 uint64_t addr, last;
1334
1335                 if (nodes) {
1336                         addr = nodes->start << PAGE_SHIFT;
1337                         max_entries = (nodes->size - pfn) *
1338                                 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1339                 } else {
1340                         addr = 0;
1341                         max_entries = S64_MAX;
1342                 }
1343
1344                 if (pages_addr) {
1345                         if (flags == gtt_flags)
1346                                 src = adev->gart.table_addr +
1347                                         (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
1348                         else
1349                                 max_entries = min(max_entries, 16ull * 1024ull);
1350                         addr = 0;
1351                 } else if (flags & AMDGPU_PTE_VALID) {
1352                         addr += adev->vm_manager.vram_base_offset;
1353                 }
1354                 addr += pfn << PAGE_SHIFT;
1355
1356                 last = min((uint64_t)mapping->it.last, start + max_entries - 1);
1357                 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1358                                                 src, pages_addr, vm,
1359                                                 start, last, flags, addr,
1360                                                 fence);
1361                 if (r)
1362                         return r;
1363
1364                 pfn += last - start + 1;
1365                 if (nodes && nodes->size == pfn) {
1366                         pfn = 0;
1367                         ++nodes;
1368                 }
1369                 start = last + 1;
1370
1371         } while (unlikely(start != mapping->it.last + 1));
1372
1373         return 0;
1374 }
1375
1376 /**
1377  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1378  *
1379  * @adev: amdgpu_device pointer
1380  * @bo_va: requested BO and VM object
1381  * @clear: if true clear the entries
1382  *
1383  * Fill in the page table entries for @bo_va.
1384  * Returns 0 for success, -EINVAL for failure.
1385  */
1386 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1387                         struct amdgpu_bo_va *bo_va,
1388                         bool clear)
1389 {
1390         struct amdgpu_vm *vm = bo_va->vm;
1391         struct amdgpu_bo_va_mapping *mapping;
1392         dma_addr_t *pages_addr = NULL;
1393         uint64_t gtt_flags, flags;
1394         struct ttm_mem_reg *mem;
1395         struct drm_mm_node *nodes;
1396         struct dma_fence *exclusive;
1397         int r;
1398
1399         if (clear || !bo_va->bo) {
1400                 mem = NULL;
1401                 nodes = NULL;
1402                 exclusive = NULL;
1403         } else {
1404                 struct ttm_dma_tt *ttm;
1405
1406                 mem = &bo_va->bo->tbo.mem;
1407                 nodes = mem->mm_node;
1408                 if (mem->mem_type == TTM_PL_TT) {
1409                         ttm = container_of(bo_va->bo->tbo.ttm, struct
1410                                            ttm_dma_tt, ttm);
1411                         pages_addr = ttm->dma_address;
1412                 }
1413                 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
1414         }
1415
1416         if (bo_va->bo) {
1417                 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1418                 gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
1419                         adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
1420                         flags : 0;
1421         } else {
1422                 flags = 0x0;
1423                 gtt_flags = ~0x0;
1424         }
1425
1426         spin_lock(&vm->status_lock);
1427         if (!list_empty(&bo_va->vm_status))
1428                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1429         spin_unlock(&vm->status_lock);
1430
1431         list_for_each_entry(mapping, &bo_va->invalids, list) {
1432                 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1433                                                gtt_flags, pages_addr, vm,
1434                                                mapping, flags, nodes,
1435                                                &bo_va->last_pt_update);
1436                 if (r)
1437                         return r;
1438         }
1439
1440         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1441                 list_for_each_entry(mapping, &bo_va->valids, list)
1442                         trace_amdgpu_vm_bo_mapping(mapping);
1443
1444                 list_for_each_entry(mapping, &bo_va->invalids, list)
1445                         trace_amdgpu_vm_bo_mapping(mapping);
1446         }
1447
1448         spin_lock(&vm->status_lock);
1449         list_splice_init(&bo_va->invalids, &bo_va->valids);
1450         list_del_init(&bo_va->vm_status);
1451         if (clear)
1452                 list_add(&bo_va->vm_status, &vm->cleared);
1453         spin_unlock(&vm->status_lock);
1454
1455         return 0;
1456 }
1457
1458 /**
1459  * amdgpu_vm_update_prt_state - update the global PRT state
1460  */
1461 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1462 {
1463         unsigned long flags;
1464         bool enable;
1465
1466         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1467         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1468         adev->gart.gart_funcs->set_prt(adev, enable);
1469         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1470 }
1471
1472 /**
1473  * amdgpu_vm_prt_get - add a PRT user
1474  */
1475 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1476 {
1477         if (!adev->gart.gart_funcs->set_prt)
1478                 return;
1479
1480         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1481                 amdgpu_vm_update_prt_state(adev);
1482 }
1483
1484 /**
1485  * amdgpu_vm_prt_put - drop a PRT user
1486  */
1487 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1488 {
1489         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1490                 amdgpu_vm_update_prt_state(adev);
1491 }
1492
1493 /**
1494  * amdgpu_vm_prt_cb - callback for updating the PRT status
1495  */
1496 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1497 {
1498         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1499
1500         amdgpu_vm_prt_put(cb->adev);
1501         kfree(cb);
1502 }
1503
1504 /**
1505  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1506  */
1507 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1508                                  struct dma_fence *fence)
1509 {
1510         struct amdgpu_prt_cb *cb;
1511
1512         if (!adev->gart.gart_funcs->set_prt)
1513                 return;
1514
1515         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1516         if (!cb) {
1517                 /* Last resort when we are OOM */
1518                 if (fence)
1519                         dma_fence_wait(fence, false);
1520
1521                 amdgpu_vm_prt_put(cb->adev);
1522         } else {
1523                 cb->adev = adev;
1524                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1525                                                      amdgpu_vm_prt_cb))
1526                         amdgpu_vm_prt_cb(fence, &cb->cb);
1527         }
1528 }
1529
1530 /**
1531  * amdgpu_vm_free_mapping - free a mapping
1532  *
1533  * @adev: amdgpu_device pointer
1534  * @vm: requested vm
1535  * @mapping: mapping to be freed
1536  * @fence: fence of the unmap operation
1537  *
1538  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1539  */
1540 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1541                                    struct amdgpu_vm *vm,
1542                                    struct amdgpu_bo_va_mapping *mapping,
1543                                    struct dma_fence *fence)
1544 {
1545         if (mapping->flags & AMDGPU_PTE_PRT)
1546                 amdgpu_vm_add_prt_cb(adev, fence);
1547         kfree(mapping);
1548 }
1549
1550 /**
1551  * amdgpu_vm_prt_fini - finish all prt mappings
1552  *
1553  * @adev: amdgpu_device pointer
1554  * @vm: requested vm
1555  *
1556  * Register a cleanup callback to disable PRT support after VM dies.
1557  */
1558 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1559 {
1560         struct reservation_object *resv = vm->root.bo->tbo.resv;
1561         struct dma_fence *excl, **shared;
1562         unsigned i, shared_count;
1563         int r;
1564
1565         r = reservation_object_get_fences_rcu(resv, &excl,
1566                                               &shared_count, &shared);
1567         if (r) {
1568                 /* Not enough memory to grab the fence list, as last resort
1569                  * block for all the fences to complete.
1570                  */
1571                 reservation_object_wait_timeout_rcu(resv, true, false,
1572                                                     MAX_SCHEDULE_TIMEOUT);
1573                 return;
1574         }
1575
1576         /* Add a callback for each fence in the reservation object */
1577         amdgpu_vm_prt_get(adev);
1578         amdgpu_vm_add_prt_cb(adev, excl);
1579
1580         for (i = 0; i < shared_count; ++i) {
1581                 amdgpu_vm_prt_get(adev);
1582                 amdgpu_vm_add_prt_cb(adev, shared[i]);
1583         }
1584
1585         kfree(shared);
1586 }
1587
1588 /**
1589  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1590  *
1591  * @adev: amdgpu_device pointer
1592  * @vm: requested vm
1593  * @fence: optional resulting fence (unchanged if no work needed to be done
1594  * or if an error occurred)
1595  *
1596  * Make sure all freed BOs are cleared in the PT.
1597  * Returns 0 for success.
1598  *
1599  * PTs have to be reserved and mutex must be locked!
1600  */
1601 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1602                           struct amdgpu_vm *vm,
1603                           struct dma_fence **fence)
1604 {
1605         struct amdgpu_bo_va_mapping *mapping;
1606         struct dma_fence *f = NULL;
1607         int r;
1608
1609         while (!list_empty(&vm->freed)) {
1610                 mapping = list_first_entry(&vm->freed,
1611                         struct amdgpu_bo_va_mapping, list);
1612                 list_del(&mapping->list);
1613
1614                 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
1615                                                0, 0, &f);
1616                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1617                 if (r) {
1618                         dma_fence_put(f);
1619                         return r;
1620                 }
1621         }
1622
1623         if (fence && f) {
1624                 dma_fence_put(*fence);
1625                 *fence = f;
1626         } else {
1627                 dma_fence_put(f);
1628         }
1629
1630         return 0;
1631
1632 }
1633
1634 /**
1635  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1636  *
1637  * @adev: amdgpu_device pointer
1638  * @vm: requested vm
1639  *
1640  * Make sure all invalidated BOs are cleared in the PT.
1641  * Returns 0 for success.
1642  *
1643  * PTs have to be reserved and mutex must be locked!
1644  */
1645 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1646                              struct amdgpu_vm *vm, struct amdgpu_sync *sync)
1647 {
1648         struct amdgpu_bo_va *bo_va = NULL;
1649         int r = 0;
1650
1651         spin_lock(&vm->status_lock);
1652         while (!list_empty(&vm->invalidated)) {
1653                 bo_va = list_first_entry(&vm->invalidated,
1654                         struct amdgpu_bo_va, vm_status);
1655                 spin_unlock(&vm->status_lock);
1656
1657                 r = amdgpu_vm_bo_update(adev, bo_va, true);
1658                 if (r)
1659                         return r;
1660
1661                 spin_lock(&vm->status_lock);
1662         }
1663         spin_unlock(&vm->status_lock);
1664
1665         if (bo_va)
1666                 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1667
1668         return r;
1669 }
1670
1671 /**
1672  * amdgpu_vm_bo_add - add a bo to a specific vm
1673  *
1674  * @adev: amdgpu_device pointer
1675  * @vm: requested vm
1676  * @bo: amdgpu buffer object
1677  *
1678  * Add @bo into the requested vm.
1679  * Add @bo to the list of bos associated with the vm
1680  * Returns newly added bo_va or NULL for failure
1681  *
1682  * Object has to be reserved!
1683  */
1684 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1685                                       struct amdgpu_vm *vm,
1686                                       struct amdgpu_bo *bo)
1687 {
1688         struct amdgpu_bo_va *bo_va;
1689
1690         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1691         if (bo_va == NULL) {
1692                 return NULL;
1693         }
1694         bo_va->vm = vm;
1695         bo_va->bo = bo;
1696         bo_va->ref_count = 1;
1697         INIT_LIST_HEAD(&bo_va->bo_list);
1698         INIT_LIST_HEAD(&bo_va->valids);
1699         INIT_LIST_HEAD(&bo_va->invalids);
1700         INIT_LIST_HEAD(&bo_va->vm_status);
1701
1702         if (bo)
1703                 list_add_tail(&bo_va->bo_list, &bo->va);
1704
1705         return bo_va;
1706 }
1707
1708 /**
1709  * amdgpu_vm_bo_map - map bo inside a vm
1710  *
1711  * @adev: amdgpu_device pointer
1712  * @bo_va: bo_va to store the address
1713  * @saddr: where to map the BO
1714  * @offset: requested offset in the BO
1715  * @flags: attributes of pages (read/write/valid/etc.)
1716  *
1717  * Add a mapping of the BO at the specefied addr into the VM.
1718  * Returns 0 for success, error for failure.
1719  *
1720  * Object has to be reserved and unreserved outside!
1721  */
1722 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1723                      struct amdgpu_bo_va *bo_va,
1724                      uint64_t saddr, uint64_t offset,
1725                      uint64_t size, uint64_t flags)
1726 {
1727         struct amdgpu_bo_va_mapping *mapping;
1728         struct amdgpu_vm *vm = bo_va->vm;
1729         struct interval_tree_node *it;
1730         uint64_t eaddr;
1731
1732         /* validate the parameters */
1733         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1734             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1735                 return -EINVAL;
1736
1737         /* make sure object fit at this offset */
1738         eaddr = saddr + size - 1;
1739         if (saddr >= eaddr ||
1740             (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
1741                 return -EINVAL;
1742
1743         saddr /= AMDGPU_GPU_PAGE_SIZE;
1744         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1745
1746         it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1747         if (it) {
1748                 struct amdgpu_bo_va_mapping *tmp;
1749                 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1750                 /* bo and tmp overlap, invalid addr */
1751                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1752                         "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1753                         tmp->it.start, tmp->it.last + 1);
1754                 return -EINVAL;
1755         }
1756
1757         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1758         if (!mapping)
1759                 return -ENOMEM;
1760
1761         INIT_LIST_HEAD(&mapping->list);
1762         mapping->it.start = saddr;
1763         mapping->it.last = eaddr;
1764         mapping->offset = offset;
1765         mapping->flags = flags;
1766
1767         list_add(&mapping->list, &bo_va->invalids);
1768         interval_tree_insert(&mapping->it, &vm->va);
1769
1770         if (flags & AMDGPU_PTE_PRT)
1771                 amdgpu_vm_prt_get(adev);
1772
1773         return 0;
1774 }
1775
1776 /**
1777  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1778  *
1779  * @adev: amdgpu_device pointer
1780  * @bo_va: bo_va to store the address
1781  * @saddr: where to map the BO
1782  * @offset: requested offset in the BO
1783  * @flags: attributes of pages (read/write/valid/etc.)
1784  *
1785  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1786  * mappings as we do so.
1787  * Returns 0 for success, error for failure.
1788  *
1789  * Object has to be reserved and unreserved outside!
1790  */
1791 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1792                              struct amdgpu_bo_va *bo_va,
1793                              uint64_t saddr, uint64_t offset,
1794                              uint64_t size, uint64_t flags)
1795 {
1796         struct amdgpu_bo_va_mapping *mapping;
1797         struct amdgpu_vm *vm = bo_va->vm;
1798         uint64_t eaddr;
1799         int r;
1800
1801         /* validate the parameters */
1802         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1803             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1804                 return -EINVAL;
1805
1806         /* make sure object fit at this offset */
1807         eaddr = saddr + size - 1;
1808         if (saddr >= eaddr ||
1809             (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
1810                 return -EINVAL;
1811
1812         /* Allocate all the needed memory */
1813         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1814         if (!mapping)
1815                 return -ENOMEM;
1816
1817         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
1818         if (r) {
1819                 kfree(mapping);
1820                 return r;
1821         }
1822
1823         saddr /= AMDGPU_GPU_PAGE_SIZE;
1824         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1825
1826         mapping->it.start = saddr;
1827         mapping->it.last = eaddr;
1828         mapping->offset = offset;
1829         mapping->flags = flags;
1830
1831         list_add(&mapping->list, &bo_va->invalids);
1832         interval_tree_insert(&mapping->it, &vm->va);
1833
1834         if (flags & AMDGPU_PTE_PRT)
1835                 amdgpu_vm_prt_get(adev);
1836
1837         return 0;
1838 }
1839
1840 /**
1841  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1842  *
1843  * @adev: amdgpu_device pointer
1844  * @bo_va: bo_va to remove the address from
1845  * @saddr: where to the BO is mapped
1846  *
1847  * Remove a mapping of the BO at the specefied addr from the VM.
1848  * Returns 0 for success, error for failure.
1849  *
1850  * Object has to be reserved and unreserved outside!
1851  */
1852 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1853                        struct amdgpu_bo_va *bo_va,
1854                        uint64_t saddr)
1855 {
1856         struct amdgpu_bo_va_mapping *mapping;
1857         struct amdgpu_vm *vm = bo_va->vm;
1858         bool valid = true;
1859
1860         saddr /= AMDGPU_GPU_PAGE_SIZE;
1861
1862         list_for_each_entry(mapping, &bo_va->valids, list) {
1863                 if (mapping->it.start == saddr)
1864                         break;
1865         }
1866
1867         if (&mapping->list == &bo_va->valids) {
1868                 valid = false;
1869
1870                 list_for_each_entry(mapping, &bo_va->invalids, list) {
1871                         if (mapping->it.start == saddr)
1872                                 break;
1873                 }
1874
1875                 if (&mapping->list == &bo_va->invalids)
1876                         return -ENOENT;
1877         }
1878
1879         list_del(&mapping->list);
1880         interval_tree_remove(&mapping->it, &vm->va);
1881         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1882
1883         if (valid)
1884                 list_add(&mapping->list, &vm->freed);
1885         else
1886                 amdgpu_vm_free_mapping(adev, vm, mapping,
1887                                        bo_va->last_pt_update);
1888
1889         return 0;
1890 }
1891
1892 /**
1893  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1894  *
1895  * @adev: amdgpu_device pointer
1896  * @vm: VM structure to use
1897  * @saddr: start of the range
1898  * @size: size of the range
1899  *
1900  * Remove all mappings in a range, split them as appropriate.
1901  * Returns 0 for success, error for failure.
1902  */
1903 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1904                                 struct amdgpu_vm *vm,
1905                                 uint64_t saddr, uint64_t size)
1906 {
1907         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1908         struct interval_tree_node *it;
1909         LIST_HEAD(removed);
1910         uint64_t eaddr;
1911
1912         eaddr = saddr + size - 1;
1913         saddr /= AMDGPU_GPU_PAGE_SIZE;
1914         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1915
1916         /* Allocate all the needed memory */
1917         before = kzalloc(sizeof(*before), GFP_KERNEL);
1918         if (!before)
1919                 return -ENOMEM;
1920         INIT_LIST_HEAD(&before->list);
1921
1922         after = kzalloc(sizeof(*after), GFP_KERNEL);
1923         if (!after) {
1924                 kfree(before);
1925                 return -ENOMEM;
1926         }
1927         INIT_LIST_HEAD(&after->list);
1928
1929         /* Now gather all removed mappings */
1930         it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1931         while (it) {
1932                 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1933                 it = interval_tree_iter_next(it, saddr, eaddr);
1934
1935                 /* Remember mapping split at the start */
1936                 if (tmp->it.start < saddr) {
1937                         before->it.start = tmp->it.start;
1938                         before->it.last = saddr - 1;
1939                         before->offset = tmp->offset;
1940                         before->flags = tmp->flags;
1941                         list_add(&before->list, &tmp->list);
1942                 }
1943
1944                 /* Remember mapping split at the end */
1945                 if (tmp->it.last > eaddr) {
1946                         after->it.start = eaddr + 1;
1947                         after->it.last = tmp->it.last;
1948                         after->offset = tmp->offset;
1949                         after->offset += after->it.start - tmp->it.start;
1950                         after->flags = tmp->flags;
1951                         list_add(&after->list, &tmp->list);
1952                 }
1953
1954                 list_del(&tmp->list);
1955                 list_add(&tmp->list, &removed);
1956         }
1957
1958         /* And free them up */
1959         list_for_each_entry_safe(tmp, next, &removed, list) {
1960                 interval_tree_remove(&tmp->it, &vm->va);
1961                 list_del(&tmp->list);
1962
1963                 if (tmp->it.start < saddr)
1964                     tmp->it.start = saddr;
1965                 if (tmp->it.last > eaddr)
1966                     tmp->it.last = eaddr;
1967
1968                 list_add(&tmp->list, &vm->freed);
1969                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
1970         }
1971
1972         /* Insert partial mapping before the range */
1973         if (!list_empty(&before->list)) {
1974                 interval_tree_insert(&before->it, &vm->va);
1975                 if (before->flags & AMDGPU_PTE_PRT)
1976                         amdgpu_vm_prt_get(adev);
1977         } else {
1978                 kfree(before);
1979         }
1980
1981         /* Insert partial mapping after the range */
1982         if (!list_empty(&after->list)) {
1983                 interval_tree_insert(&after->it, &vm->va);
1984                 if (after->flags & AMDGPU_PTE_PRT)
1985                         amdgpu_vm_prt_get(adev);
1986         } else {
1987                 kfree(after);
1988         }
1989
1990         return 0;
1991 }
1992
1993 /**
1994  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1995  *
1996  * @adev: amdgpu_device pointer
1997  * @bo_va: requested bo_va
1998  *
1999  * Remove @bo_va->bo from the requested vm.
2000  *
2001  * Object have to be reserved!
2002  */
2003 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2004                       struct amdgpu_bo_va *bo_va)
2005 {
2006         struct amdgpu_bo_va_mapping *mapping, *next;
2007         struct amdgpu_vm *vm = bo_va->vm;
2008
2009         list_del(&bo_va->bo_list);
2010
2011         spin_lock(&vm->status_lock);
2012         list_del(&bo_va->vm_status);
2013         spin_unlock(&vm->status_lock);
2014
2015         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2016                 list_del(&mapping->list);
2017                 interval_tree_remove(&mapping->it, &vm->va);
2018                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2019                 list_add(&mapping->list, &vm->freed);
2020         }
2021         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2022                 list_del(&mapping->list);
2023                 interval_tree_remove(&mapping->it, &vm->va);
2024                 amdgpu_vm_free_mapping(adev, vm, mapping,
2025                                        bo_va->last_pt_update);
2026         }
2027
2028         dma_fence_put(bo_va->last_pt_update);
2029         kfree(bo_va);
2030 }
2031
2032 /**
2033  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2034  *
2035  * @adev: amdgpu_device pointer
2036  * @vm: requested vm
2037  * @bo: amdgpu buffer object
2038  *
2039  * Mark @bo as invalid.
2040  */
2041 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2042                              struct amdgpu_bo *bo)
2043 {
2044         struct amdgpu_bo_va *bo_va;
2045
2046         list_for_each_entry(bo_va, &bo->va, bo_list) {
2047                 spin_lock(&bo_va->vm->status_lock);
2048                 if (list_empty(&bo_va->vm_status))
2049                         list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
2050                 spin_unlock(&bo_va->vm->status_lock);
2051         }
2052 }
2053
2054 /**
2055  * amdgpu_vm_init - initialize a vm instance
2056  *
2057  * @adev: amdgpu_device pointer
2058  * @vm: requested vm
2059  *
2060  * Init @vm fields.
2061  */
2062 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2063 {
2064         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2065                 AMDGPU_VM_PTE_COUNT * 8);
2066         unsigned ring_instance;
2067         struct amdgpu_ring *ring;
2068         struct amd_sched_rq *rq;
2069         int i, r;
2070
2071         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2072                 vm->ids[i] = NULL;
2073         vm->va = RB_ROOT;
2074         vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
2075         spin_lock_init(&vm->status_lock);
2076         INIT_LIST_HEAD(&vm->invalidated);
2077         INIT_LIST_HEAD(&vm->cleared);
2078         INIT_LIST_HEAD(&vm->freed);
2079
2080         /* create scheduler entity for page table updates */
2081
2082         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2083         ring_instance %= adev->vm_manager.vm_pte_num_rings;
2084         ring = adev->vm_manager.vm_pte_rings[ring_instance];
2085         rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
2086         r = amd_sched_entity_init(&ring->sched, &vm->entity,
2087                                   rq, amdgpu_sched_jobs);
2088         if (r)
2089                 return r;
2090
2091         vm->last_dir_update = NULL;
2092
2093         r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
2094                              AMDGPU_GEM_DOMAIN_VRAM,
2095                              AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
2096                              AMDGPU_GEM_CREATE_SHADOW |
2097                              AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
2098                              AMDGPU_GEM_CREATE_VRAM_CLEARED,
2099                              NULL, NULL, &vm->root.bo);
2100         if (r)
2101                 goto error_free_sched_entity;
2102
2103         r = amdgpu_bo_reserve(vm->root.bo, false);
2104         if (r)
2105                 goto error_free_root;
2106
2107         vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
2108         amdgpu_bo_unreserve(vm->root.bo);
2109
2110         return 0;
2111
2112 error_free_root:
2113         amdgpu_bo_unref(&vm->root.bo->shadow);
2114         amdgpu_bo_unref(&vm->root.bo);
2115         vm->root.bo = NULL;
2116
2117 error_free_sched_entity:
2118         amd_sched_entity_fini(&ring->sched, &vm->entity);
2119
2120         return r;
2121 }
2122
2123 /**
2124  * amdgpu_vm_free_levels - free PD/PT levels
2125  *
2126  * @level: PD/PT starting level to free
2127  *
2128  * Free the page directory or page table level and all sub levels.
2129  */
2130 static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
2131 {
2132         unsigned i;
2133
2134         if (level->bo) {
2135                 amdgpu_bo_unref(&level->bo->shadow);
2136                 amdgpu_bo_unref(&level->bo);
2137         }
2138
2139         if (level->entries)
2140                 for (i = 0; i <= level->last_entry_used; i++)
2141                         amdgpu_vm_free_levels(&level->entries[i]);
2142
2143         drm_free_large(level->entries);
2144 }
2145
2146 /**
2147  * amdgpu_vm_fini - tear down a vm instance
2148  *
2149  * @adev: amdgpu_device pointer
2150  * @vm: requested vm
2151  *
2152  * Tear down @vm.
2153  * Unbind the VM and remove all bos from the vm bo list
2154  */
2155 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2156 {
2157         struct amdgpu_bo_va_mapping *mapping, *tmp;
2158         bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
2159
2160         amd_sched_entity_fini(vm->entity.sched, &vm->entity);
2161
2162         if (!RB_EMPTY_ROOT(&vm->va)) {
2163                 dev_err(adev->dev, "still active bo inside vm\n");
2164         }
2165         rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
2166                 list_del(&mapping->list);
2167                 interval_tree_remove(&mapping->it, &vm->va);
2168                 kfree(mapping);
2169         }
2170         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2171                 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2172                         amdgpu_vm_prt_fini(adev, vm);
2173                         prt_fini_needed = false;
2174                 }
2175
2176                 list_del(&mapping->list);
2177                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2178         }
2179
2180         amdgpu_vm_free_levels(&vm->root);
2181         dma_fence_put(vm->last_dir_update);
2182 }
2183
2184 /**
2185  * amdgpu_vm_manager_init - init the VM manager
2186  *
2187  * @adev: amdgpu_device pointer
2188  *
2189  * Initialize the VM manager structures
2190  */
2191 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2192 {
2193         unsigned i;
2194
2195         INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
2196
2197         /* skip over VMID 0, since it is the system VM */
2198         for (i = 1; i < adev->vm_manager.num_ids; ++i) {
2199                 amdgpu_vm_reset_id(adev, i);
2200                 amdgpu_sync_create(&adev->vm_manager.ids[i].active);
2201                 list_add_tail(&adev->vm_manager.ids[i].list,
2202                               &adev->vm_manager.ids_lru);
2203         }
2204
2205         adev->vm_manager.fence_context =
2206                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2207         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2208                 adev->vm_manager.seqno[i] = 0;
2209
2210         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2211         atomic64_set(&adev->vm_manager.client_counter, 0);
2212         spin_lock_init(&adev->vm_manager.prt_lock);
2213         atomic_set(&adev->vm_manager.num_prt_users, 0);
2214 }
2215
2216 /**
2217  * amdgpu_vm_manager_fini - cleanup VM manager
2218  *
2219  * @adev: amdgpu_device pointer
2220  *
2221  * Cleanup the VM manager and free resources.
2222  */
2223 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2224 {
2225         unsigned i;
2226
2227         for (i = 0; i < AMDGPU_NUM_VM; ++i) {
2228                 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
2229
2230                 dma_fence_put(adev->vm_manager.ids[i].first);
2231                 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
2232                 dma_fence_put(id->flushed_updates);
2233                 dma_fence_put(id->last_flush);
2234         }
2235 }
This page took 0.167955 seconds and 4 git commands to generate.