2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include <linux/pagemap.h>
28 #include <linux/sync_file.h>
30 #include <drm/amdgpu_drm.h>
31 #include <drm/drm_syncobj.h>
33 #include "amdgpu_trace.h"
34 #include "amdgpu_gmc.h"
36 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
37 struct drm_amdgpu_cs_chunk_fence *data,
40 struct drm_gem_object *gobj;
43 gobj = drm_gem_object_lookup(p->filp, data->handle);
47 p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
48 p->uf_entry.priority = 0;
49 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
50 p->uf_entry.tv.shared = true;
51 p->uf_entry.user_pages = NULL;
53 size = amdgpu_bo_size(p->uf_entry.robj);
54 if (size != PAGE_SIZE || (data->offset + 8) > size)
57 *offset = data->offset;
59 drm_gem_object_put_unlocked(gobj);
61 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
62 amdgpu_bo_unref(&p->uf_entry.robj);
69 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
70 struct drm_amdgpu_bo_list_in *data)
73 struct drm_amdgpu_bo_list_entry *info = NULL;
75 r = amdgpu_bo_create_list_entry_array(data, &info);
79 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
94 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
96 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
97 struct amdgpu_vm *vm = &fpriv->vm;
98 uint64_t *chunk_array_user;
99 uint64_t *chunk_array;
100 unsigned size, num_ibs = 0;
101 uint32_t uf_offset = 0;
105 if (cs->in.num_chunks == 0)
108 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
112 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
118 /* skip guilty context job */
119 if (atomic_read(&p->ctx->guilty) == 1) {
124 mutex_lock(&p->ctx->lock);
127 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
128 if (copy_from_user(chunk_array, chunk_array_user,
129 sizeof(uint64_t)*cs->in.num_chunks)) {
134 p->nchunks = cs->in.num_chunks;
135 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
142 for (i = 0; i < p->nchunks; i++) {
143 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
144 struct drm_amdgpu_cs_chunk user_chunk;
145 uint32_t __user *cdata;
147 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
148 if (copy_from_user(&user_chunk, chunk_ptr,
149 sizeof(struct drm_amdgpu_cs_chunk))) {
152 goto free_partial_kdata;
154 p->chunks[i].chunk_id = user_chunk.chunk_id;
155 p->chunks[i].length_dw = user_chunk.length_dw;
157 size = p->chunks[i].length_dw;
158 cdata = u64_to_user_ptr(user_chunk.chunk_data);
160 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
161 if (p->chunks[i].kdata == NULL) {
164 goto free_partial_kdata;
166 size *= sizeof(uint32_t);
167 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
169 goto free_partial_kdata;
172 switch (p->chunks[i].chunk_id) {
173 case AMDGPU_CHUNK_ID_IB:
177 case AMDGPU_CHUNK_ID_FENCE:
178 size = sizeof(struct drm_amdgpu_cs_chunk_fence);
179 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
181 goto free_partial_kdata;
184 ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
187 goto free_partial_kdata;
191 case AMDGPU_CHUNK_ID_BO_HANDLES:
192 size = sizeof(struct drm_amdgpu_bo_list_in);
193 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
195 goto free_partial_kdata;
198 ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
200 goto free_partial_kdata;
204 case AMDGPU_CHUNK_ID_DEPENDENCIES:
205 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
206 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
211 goto free_partial_kdata;
215 ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
219 if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
224 if (p->uf_entry.robj)
225 p->job->uf_addr = uf_offset;
228 /* Use this opportunity to fill in task info for the vm */
229 amdgpu_vm_set_task_info(vm);
237 kvfree(p->chunks[i].kdata);
247 /* Convert microseconds to bytes. */
248 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
250 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
253 /* Since accum_us is incremented by a million per second, just
254 * multiply it by the number of MB/s to get the number of bytes.
256 return us << adev->mm_stats.log2_max_MBps;
259 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
261 if (!adev->mm_stats.log2_max_MBps)
264 return bytes >> adev->mm_stats.log2_max_MBps;
267 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
268 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
269 * which means it can go over the threshold once. If that happens, the driver
270 * will be in debt and no other buffer migrations can be done until that debt
273 * This approach allows moving a buffer of any size (it's important to allow
276 * The currency is simply time in microseconds and it increases as the clock
277 * ticks. The accumulated microseconds (us) are converted to bytes and
280 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
284 s64 time_us, increment_us;
285 u64 free_vram, total_vram, used_vram;
287 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
290 * It means that in order to get full max MBps, at least 5 IBs per
291 * second must be submitted and not more than 200ms apart from each
294 const s64 us_upper_bound = 200000;
296 if (!adev->mm_stats.log2_max_MBps) {
302 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
303 used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
304 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
306 spin_lock(&adev->mm_stats.lock);
308 /* Increase the amount of accumulated us. */
309 time_us = ktime_to_us(ktime_get());
310 increment_us = time_us - adev->mm_stats.last_update_us;
311 adev->mm_stats.last_update_us = time_us;
312 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
315 /* This prevents the short period of low performance when the VRAM
316 * usage is low and the driver is in debt or doesn't have enough
317 * accumulated us to fill VRAM quickly.
319 * The situation can occur in these cases:
320 * - a lot of VRAM is freed by userspace
321 * - the presence of a big buffer causes a lot of evictions
322 * (solution: split buffers into smaller ones)
324 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
325 * accum_us to a positive number.
327 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
330 /* Be more aggresive on dGPUs. Try to fill a portion of free
333 if (!(adev->flags & AMD_IS_APU))
334 min_us = bytes_to_us(adev, free_vram / 4);
336 min_us = 0; /* Reset accum_us on APUs. */
338 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
341 /* This is set to 0 if the driver is in debt to disallow (optional)
344 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
346 /* Do the same for visible VRAM if half of it is free */
347 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
348 u64 total_vis_vram = adev->gmc.visible_vram_size;
350 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
352 if (used_vis_vram < total_vis_vram) {
353 u64 free_vis_vram = total_vis_vram - used_vis_vram;
354 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
355 increment_us, us_upper_bound);
357 if (free_vis_vram >= total_vis_vram / 2)
358 adev->mm_stats.accum_us_vis =
359 max(bytes_to_us(adev, free_vis_vram / 2),
360 adev->mm_stats.accum_us_vis);
363 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
368 spin_unlock(&adev->mm_stats.lock);
371 /* Report how many bytes have really been moved for the last command
372 * submission. This can result in a debt that can stop buffer migrations
375 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
378 spin_lock(&adev->mm_stats.lock);
379 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
380 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
381 spin_unlock(&adev->mm_stats.lock);
384 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
385 struct amdgpu_bo *bo)
387 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
388 struct ttm_operation_ctx ctx = {
389 .interruptible = true,
390 .no_wait_gpu = false,
391 .resv = bo->tbo.resv,
400 /* Don't move this buffer if we have depleted our allowance
401 * to move it. Don't move anything if the threshold is zero.
403 if (p->bytes_moved < p->bytes_moved_threshold) {
404 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
405 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
406 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
407 * visible VRAM if we've depleted our allowance to do
410 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
411 domain = bo->preferred_domains;
413 domain = bo->allowed_domains;
415 domain = bo->preferred_domains;
418 domain = bo->allowed_domains;
422 amdgpu_bo_placement_from_domain(bo, domain);
423 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
425 p->bytes_moved += ctx.bytes_moved;
426 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
427 amdgpu_bo_in_cpu_visible_vram(bo))
428 p->bytes_moved_vis += ctx.bytes_moved;
430 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
431 domain = bo->allowed_domains;
438 /* Last resort, try to evict something from the current working set */
439 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
440 struct amdgpu_bo *validated)
442 uint32_t domain = validated->allowed_domains;
443 struct ttm_operation_ctx ctx = { true, false };
449 for (;&p->evictable->tv.head != &p->validated;
450 p->evictable = list_prev_entry(p->evictable, tv.head)) {
452 struct amdgpu_bo_list_entry *candidate = p->evictable;
453 struct amdgpu_bo *bo = candidate->robj;
454 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
455 bool update_bytes_moved_vis;
458 /* If we reached our current BO we can forget it */
459 if (candidate->robj == validated)
462 /* We can't move pinned BOs here */
466 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
468 /* Check if this BO is in one of the domains we need space for */
469 if (!(other & domain))
472 /* Check if we can move this BO somewhere else */
473 other = bo->allowed_domains & ~domain;
477 /* Good we can try to move this BO somewhere else */
478 update_bytes_moved_vis =
479 !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
480 amdgpu_bo_in_cpu_visible_vram(bo);
481 amdgpu_bo_placement_from_domain(bo, other);
482 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
483 p->bytes_moved += ctx.bytes_moved;
484 if (update_bytes_moved_vis)
485 p->bytes_moved_vis += ctx.bytes_moved;
490 p->evictable = list_prev_entry(p->evictable, tv.head);
491 list_move(&candidate->tv.head, &p->validated);
499 static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
501 struct amdgpu_cs_parser *p = param;
505 r = amdgpu_cs_bo_validate(p, bo);
506 } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
511 r = amdgpu_cs_bo_validate(p, bo->shadow);
516 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
517 struct list_head *validated)
519 struct ttm_operation_ctx ctx = { true, false };
520 struct amdgpu_bo_list_entry *lobj;
523 list_for_each_entry(lobj, validated, tv.head) {
524 struct amdgpu_bo *bo = lobj->robj;
525 bool binding_userptr = false;
526 struct mm_struct *usermm;
528 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
529 if (usermm && usermm != current->mm)
532 /* Check if we have user pages and nobody bound the BO already */
533 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
535 amdgpu_bo_placement_from_domain(bo,
536 AMDGPU_GEM_DOMAIN_CPU);
537 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
540 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
542 binding_userptr = true;
545 if (p->evictable == lobj)
548 r = amdgpu_cs_validate(p, bo);
552 if (binding_userptr) {
553 kvfree(lobj->user_pages);
554 lobj->user_pages = NULL;
560 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
561 union drm_amdgpu_cs *cs)
563 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
564 struct amdgpu_vm *vm = &fpriv->vm;
565 struct amdgpu_bo_list_entry *e;
566 struct list_head duplicates;
567 struct amdgpu_bo *gds;
568 struct amdgpu_bo *gws;
569 struct amdgpu_bo *oa;
573 INIT_LIST_HEAD(&p->validated);
575 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
576 if (cs->in.bo_list_handle) {
580 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
584 } else if (!p->bo_list) {
585 /* Create a empty bo_list when no handle is provided */
586 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
592 amdgpu_bo_list_get_list(p->bo_list, &p->validated);
593 if (p->bo_list->first_userptr != p->bo_list->num_entries)
594 p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
596 INIT_LIST_HEAD(&duplicates);
597 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
599 if (p->uf_entry.robj && !p->uf_entry.robj->parent)
600 list_add(&p->uf_entry.tv.head, &p->validated);
603 struct list_head need_pages;
605 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
607 if (unlikely(r != 0)) {
608 if (r != -ERESTARTSYS)
609 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
610 goto error_free_pages;
613 INIT_LIST_HEAD(&need_pages);
614 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
615 struct amdgpu_bo *bo = e->robj;
617 if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
618 &e->user_invalidated) && e->user_pages) {
620 /* We acquired a page array, but somebody
621 * invalidated it. Free it and try again
623 release_pages(e->user_pages,
624 bo->tbo.ttm->num_pages);
625 kvfree(e->user_pages);
626 e->user_pages = NULL;
629 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
631 list_del(&e->tv.head);
632 list_add(&e->tv.head, &need_pages);
634 amdgpu_bo_unreserve(e->robj);
638 if (list_empty(&need_pages))
641 /* Unreserve everything again. */
642 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
644 /* We tried too many times, just abort */
647 DRM_ERROR("deadlock in %s\n", __func__);
648 goto error_free_pages;
651 /* Fill the page arrays for all userptrs. */
652 list_for_each_entry(e, &need_pages, tv.head) {
653 struct ttm_tt *ttm = e->robj->tbo.ttm;
655 e->user_pages = kvmalloc_array(ttm->num_pages,
656 sizeof(struct page*),
657 GFP_KERNEL | __GFP_ZERO);
658 if (!e->user_pages) {
660 DRM_ERROR("calloc failure in %s\n", __func__);
661 goto error_free_pages;
664 r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
666 DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
667 kvfree(e->user_pages);
668 e->user_pages = NULL;
669 goto error_free_pages;
674 list_splice(&need_pages, &p->validated);
677 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
678 &p->bytes_moved_vis_threshold);
680 p->bytes_moved_vis = 0;
681 p->evictable = list_last_entry(&p->validated,
682 struct amdgpu_bo_list_entry,
685 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
686 amdgpu_cs_validate, p);
688 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
692 r = amdgpu_cs_list_validate(p, &duplicates);
694 DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
698 r = amdgpu_cs_list_validate(p, &p->validated);
700 DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
704 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
707 gds = p->bo_list->gds_obj;
708 gws = p->bo_list->gws_obj;
709 oa = p->bo_list->oa_obj;
711 amdgpu_bo_list_for_each_entry(e, p->bo_list)
712 e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
715 p->job->gds_base = amdgpu_bo_gpu_offset(gds);
716 p->job->gds_size = amdgpu_bo_size(gds);
719 p->job->gws_base = amdgpu_bo_gpu_offset(gws);
720 p->job->gws_size = amdgpu_bo_size(gws);
723 p->job->oa_base = amdgpu_bo_gpu_offset(oa);
724 p->job->oa_size = amdgpu_bo_size(oa);
727 if (!r && p->uf_entry.robj) {
728 struct amdgpu_bo *uf = p->uf_entry.robj;
730 r = amdgpu_ttm_alloc_gart(&uf->tbo);
731 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
736 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
740 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
744 release_pages(e->user_pages,
745 e->robj->tbo.ttm->num_pages);
746 kvfree(e->user_pages);
752 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
754 struct amdgpu_bo_list_entry *e;
757 list_for_each_entry(e, &p->validated, tv.head) {
758 struct reservation_object *resv = e->robj->tbo.resv;
759 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
760 amdgpu_bo_explicit_sync(e->robj));
769 * cs_parser_fini() - clean parser states
770 * @parser: parser structure holding parsing context.
771 * @error: error number
773 * If error is set than unvalidate buffer, otherwise just free memory
774 * used by parsing context.
776 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
781 if (error && backoff)
782 ttm_eu_backoff_reservation(&parser->ticket,
785 for (i = 0; i < parser->num_post_dep_syncobjs; i++)
786 drm_syncobj_put(parser->post_dep_syncobjs[i]);
787 kfree(parser->post_dep_syncobjs);
789 dma_fence_put(parser->fence);
792 mutex_unlock(&parser->ctx->lock);
793 amdgpu_ctx_put(parser->ctx);
796 amdgpu_bo_list_put(parser->bo_list);
798 for (i = 0; i < parser->nchunks; i++)
799 kvfree(parser->chunks[i].kdata);
800 kfree(parser->chunks);
802 amdgpu_job_free(parser->job);
803 amdgpu_bo_unref(&parser->uf_entry.robj);
806 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
808 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
809 struct amdgpu_device *adev = p->adev;
810 struct amdgpu_vm *vm = &fpriv->vm;
811 struct amdgpu_bo_list_entry *e;
812 struct amdgpu_bo_va *bo_va;
813 struct amdgpu_bo *bo;
816 r = amdgpu_vm_clear_freed(adev, vm, NULL);
820 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
824 r = amdgpu_sync_fence(adev, &p->job->sync,
825 fpriv->prt_va->last_pt_update, false);
829 if (amdgpu_sriov_vf(adev)) {
832 bo_va = fpriv->csa_va;
834 r = amdgpu_vm_bo_update(adev, bo_va, false);
838 f = bo_va->last_pt_update;
839 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
844 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
847 /* ignore duplicates */
856 r = amdgpu_vm_bo_update(adev, bo_va, false);
860 f = bo_va->last_pt_update;
861 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
866 r = amdgpu_vm_handle_moved(adev, vm);
870 r = amdgpu_vm_update_directories(adev, vm);
874 r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
878 if (amdgpu_vm_debug) {
879 /* Invalidate all BOs to test for userspace bugs */
880 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
881 /* ignore duplicates */
885 amdgpu_vm_bo_invalidate(adev, e->robj, false);
892 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
893 struct amdgpu_cs_parser *p)
895 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
896 struct amdgpu_vm *vm = &fpriv->vm;
897 struct amdgpu_ring *ring = p->ring;
900 /* Only for UVD/VCE VM emulation */
901 if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
904 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
905 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
906 struct amdgpu_bo_va_mapping *m;
907 struct amdgpu_bo *aobj = NULL;
908 struct amdgpu_cs_chunk *chunk;
909 uint64_t offset, va_start;
910 struct amdgpu_ib *ib;
913 chunk = &p->chunks[i];
914 ib = &p->job->ibs[j];
915 chunk_ib = chunk->kdata;
917 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
920 va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
921 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
923 DRM_ERROR("IB va_start is invalid\n");
927 if ((va_start + chunk_ib->ib_bytes) >
928 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
929 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
933 /* the IB should be reserved at this point */
934 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
939 offset = m->start * AMDGPU_GPU_PAGE_SIZE;
940 kptr += va_start - offset;
942 if (p->ring->funcs->parse_cs) {
943 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
944 amdgpu_bo_kunmap(aobj);
946 r = amdgpu_ring_parse_cs(ring, p, j);
950 ib->ptr = (uint32_t *)kptr;
951 r = amdgpu_ring_patch_cs_in_place(ring, p, j);
952 amdgpu_bo_kunmap(aobj);
962 p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
964 r = amdgpu_bo_vm_update_pte(p);
968 r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
973 return amdgpu_cs_sync_rings(p);
976 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
977 struct amdgpu_cs_parser *parser)
979 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
980 struct amdgpu_vm *vm = &fpriv->vm;
982 int r, ce_preempt = 0, de_preempt = 0;
984 for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
985 struct amdgpu_cs_chunk *chunk;
986 struct amdgpu_ib *ib;
987 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
988 struct amdgpu_ring *ring;
990 chunk = &parser->chunks[i];
991 ib = &parser->job->ibs[j];
992 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
994 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
997 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
998 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
999 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
1005 /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
1006 if (ce_preempt > 1 || de_preempt > 1)
1010 r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
1011 chunk_ib->ip_instance, chunk_ib->ring, &ring);
1015 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
1016 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
1017 if (!parser->ctx->preamble_presented) {
1018 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1019 parser->ctx->preamble_presented = true;
1023 if (parser->ring && parser->ring != ring)
1026 parser->ring = ring;
1028 r = amdgpu_ib_get(adev, vm,
1029 ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
1032 DRM_ERROR("Failed to get ib !\n");
1036 ib->gpu_addr = chunk_ib->va_start;
1037 ib->length_dw = chunk_ib->ib_bytes / 4;
1038 ib->flags = chunk_ib->flags;
1043 /* UVD & VCE fw doesn't support user fences */
1044 if (parser->job->uf_addr && (
1045 parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
1046 parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
1049 return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
1052 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
1053 struct amdgpu_cs_chunk *chunk)
1055 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1058 struct drm_amdgpu_cs_chunk_dep *deps;
1060 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
1061 num_deps = chunk->length_dw * 4 /
1062 sizeof(struct drm_amdgpu_cs_chunk_dep);
1064 for (i = 0; i < num_deps; ++i) {
1065 struct amdgpu_ring *ring;
1066 struct amdgpu_ctx *ctx;
1067 struct dma_fence *fence;
1069 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
1073 r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr,
1075 deps[i].ip_instance,
1076 deps[i].ring, &ring);
1078 amdgpu_ctx_put(ctx);
1082 fence = amdgpu_ctx_get_fence(ctx, ring,
1084 if (IS_ERR(fence)) {
1086 amdgpu_ctx_put(ctx);
1089 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
1091 dma_fence_put(fence);
1092 amdgpu_ctx_put(ctx);
1100 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1104 struct dma_fence *fence;
1105 r = drm_syncobj_find_fence(p->filp, handle, &fence);
1109 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1110 dma_fence_put(fence);
1115 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1116 struct amdgpu_cs_chunk *chunk)
1120 struct drm_amdgpu_cs_chunk_sem *deps;
1122 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1123 num_deps = chunk->length_dw * 4 /
1124 sizeof(struct drm_amdgpu_cs_chunk_sem);
1126 for (i = 0; i < num_deps; ++i) {
1127 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
1134 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1135 struct amdgpu_cs_chunk *chunk)
1139 struct drm_amdgpu_cs_chunk_sem *deps;
1140 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1141 num_deps = chunk->length_dw * 4 /
1142 sizeof(struct drm_amdgpu_cs_chunk_sem);
1144 p->post_dep_syncobjs = kmalloc_array(num_deps,
1145 sizeof(struct drm_syncobj *),
1147 p->num_post_dep_syncobjs = 0;
1149 if (!p->post_dep_syncobjs)
1152 for (i = 0; i < num_deps; ++i) {
1153 p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
1154 if (!p->post_dep_syncobjs[i])
1156 p->num_post_dep_syncobjs++;
1161 static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1162 struct amdgpu_cs_parser *p)
1166 for (i = 0; i < p->nchunks; ++i) {
1167 struct amdgpu_cs_chunk *chunk;
1169 chunk = &p->chunks[i];
1171 if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
1172 r = amdgpu_cs_process_fence_dep(p, chunk);
1175 } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
1176 r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1179 } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
1180 r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1189 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1193 for (i = 0; i < p->num_post_dep_syncobjs; ++i)
1194 drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
1197 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1198 union drm_amdgpu_cs *cs)
1200 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1201 struct amdgpu_ring *ring = p->ring;
1202 struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
1203 enum drm_sched_priority priority;
1204 struct amdgpu_bo_list_entry *e;
1205 struct amdgpu_job *job;
1210 amdgpu_mn_lock(p->mn);
1211 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1212 struct amdgpu_bo *bo = e->robj;
1214 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1215 amdgpu_mn_unlock(p->mn);
1216 return -ERESTARTSYS;
1223 r = drm_sched_job_init(&job->base, entity, p->filp);
1225 amdgpu_job_free(job);
1226 amdgpu_mn_unlock(p->mn);
1230 job->owner = p->filp;
1231 p->fence = dma_fence_get(&job->base.s_fence->finished);
1233 r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
1235 dma_fence_put(p->fence);
1236 dma_fence_put(&job->base.s_fence->finished);
1237 amdgpu_job_free(job);
1238 amdgpu_mn_unlock(p->mn);
1242 amdgpu_cs_post_dependencies(p);
1244 cs->out.handle = seq;
1245 job->uf_sequence = seq;
1247 amdgpu_job_free_resources(job);
1249 trace_amdgpu_cs_ioctl(job);
1250 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1251 priority = job->base.s_priority;
1252 drm_sched_entity_push_job(&job->base, entity);
1254 ring = to_amdgpu_ring(entity->rq->sched);
1255 amdgpu_ring_priority_get(ring, priority);
1257 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1258 amdgpu_mn_unlock(p->mn);
1263 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1265 struct amdgpu_device *adev = dev->dev_private;
1266 union drm_amdgpu_cs *cs = data;
1267 struct amdgpu_cs_parser parser = {};
1268 bool reserved_buffers = false;
1271 if (!adev->accel_working)
1277 r = amdgpu_cs_parser_init(&parser, data);
1279 DRM_ERROR("Failed to initialize parser !\n");
1283 r = amdgpu_cs_ib_fill(adev, &parser);
1287 r = amdgpu_cs_parser_bos(&parser, data);
1290 DRM_ERROR("Not enough memory for command submission!\n");
1291 else if (r != -ERESTARTSYS)
1292 DRM_ERROR("Failed to process the buffer list %d!\n", r);
1296 reserved_buffers = true;
1298 r = amdgpu_cs_dependencies(adev, &parser);
1300 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1304 for (i = 0; i < parser.job->num_ibs; i++)
1305 trace_amdgpu_cs(&parser, i);
1307 r = amdgpu_cs_ib_vm_chunk(adev, &parser);
1311 r = amdgpu_cs_submit(&parser, cs);
1314 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1319 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1322 * @data: data from userspace
1323 * @filp: file private
1325 * Wait for the command submission identified by handle to finish.
1327 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1328 struct drm_file *filp)
1330 union drm_amdgpu_wait_cs *wait = data;
1331 struct amdgpu_device *adev = dev->dev_private;
1332 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1333 struct amdgpu_ring *ring = NULL;
1334 struct amdgpu_ctx *ctx;
1335 struct dma_fence *fence;
1338 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1342 r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
1343 wait->in.ip_type, wait->in.ip_instance,
1344 wait->in.ring, &ring);
1346 amdgpu_ctx_put(ctx);
1350 fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
1354 r = dma_fence_wait_timeout(fence, true, timeout);
1355 if (r > 0 && fence->error)
1357 dma_fence_put(fence);
1361 amdgpu_ctx_put(ctx);
1365 memset(wait, 0, sizeof(*wait));
1366 wait->out.status = (r == 0);
1372 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1374 * @adev: amdgpu device
1375 * @filp: file private
1376 * @user: drm_amdgpu_fence copied from user space
1378 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1379 struct drm_file *filp,
1380 struct drm_amdgpu_fence *user)
1382 struct amdgpu_ring *ring;
1383 struct amdgpu_ctx *ctx;
1384 struct dma_fence *fence;
1387 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1389 return ERR_PTR(-EINVAL);
1391 r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
1392 user->ip_instance, user->ring, &ring);
1394 amdgpu_ctx_put(ctx);
1398 fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
1399 amdgpu_ctx_put(ctx);
1404 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1405 struct drm_file *filp)
1407 struct amdgpu_device *adev = dev->dev_private;
1408 union drm_amdgpu_fence_to_handle *info = data;
1409 struct dma_fence *fence;
1410 struct drm_syncobj *syncobj;
1411 struct sync_file *sync_file;
1414 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1416 return PTR_ERR(fence);
1418 switch (info->in.what) {
1419 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1420 r = drm_syncobj_create(&syncobj, 0, fence);
1421 dma_fence_put(fence);
1424 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1425 drm_syncobj_put(syncobj);
1428 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1429 r = drm_syncobj_create(&syncobj, 0, fence);
1430 dma_fence_put(fence);
1433 r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
1434 drm_syncobj_put(syncobj);
1437 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1438 fd = get_unused_fd_flags(O_CLOEXEC);
1440 dma_fence_put(fence);
1444 sync_file = sync_file_create(fence);
1445 dma_fence_put(fence);
1451 fd_install(fd, sync_file->file);
1452 info->out.handle = fd;
1461 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1463 * @adev: amdgpu device
1464 * @filp: file private
1465 * @wait: wait parameters
1466 * @fences: array of drm_amdgpu_fence
1468 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1469 struct drm_file *filp,
1470 union drm_amdgpu_wait_fences *wait,
1471 struct drm_amdgpu_fence *fences)
1473 uint32_t fence_count = wait->in.fence_count;
1477 for (i = 0; i < fence_count; i++) {
1478 struct dma_fence *fence;
1479 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1481 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1483 return PTR_ERR(fence);
1487 r = dma_fence_wait_timeout(fence, true, timeout);
1488 dma_fence_put(fence);
1496 return fence->error;
1499 memset(wait, 0, sizeof(*wait));
1500 wait->out.status = (r > 0);
1506 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1508 * @adev: amdgpu device
1509 * @filp: file private
1510 * @wait: wait parameters
1511 * @fences: array of drm_amdgpu_fence
1513 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1514 struct drm_file *filp,
1515 union drm_amdgpu_wait_fences *wait,
1516 struct drm_amdgpu_fence *fences)
1518 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1519 uint32_t fence_count = wait->in.fence_count;
1520 uint32_t first = ~0;
1521 struct dma_fence **array;
1525 /* Prepare the fence array */
1526 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1531 for (i = 0; i < fence_count; i++) {
1532 struct dma_fence *fence;
1534 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1535 if (IS_ERR(fence)) {
1537 goto err_free_fence_array;
1540 } else { /* NULL, the fence has been already signaled */
1547 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1550 goto err_free_fence_array;
1553 memset(wait, 0, sizeof(*wait));
1554 wait->out.status = (r > 0);
1555 wait->out.first_signaled = first;
1557 if (first < fence_count && array[first])
1558 r = array[first]->error;
1562 err_free_fence_array:
1563 for (i = 0; i < fence_count; i++)
1564 dma_fence_put(array[i]);
1571 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1574 * @data: data from userspace
1575 * @filp: file private
1577 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1578 struct drm_file *filp)
1580 struct amdgpu_device *adev = dev->dev_private;
1581 union drm_amdgpu_wait_fences *wait = data;
1582 uint32_t fence_count = wait->in.fence_count;
1583 struct drm_amdgpu_fence *fences_user;
1584 struct drm_amdgpu_fence *fences;
1587 /* Get the fences from userspace */
1588 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1593 fences_user = u64_to_user_ptr(wait->in.fences);
1594 if (copy_from_user(fences, fences_user,
1595 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1597 goto err_free_fences;
1600 if (wait->in.wait_all)
1601 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1603 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1612 * amdgpu_cs_find_bo_va - find bo_va for VM address
1614 * @parser: command submission parser context
1616 * @bo: resulting BO of the mapping found
1618 * Search the buffer objects in the command submission context for a certain
1619 * virtual memory address. Returns allocation structure when found, NULL
1622 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1623 uint64_t addr, struct amdgpu_bo **bo,
1624 struct amdgpu_bo_va_mapping **map)
1626 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1627 struct ttm_operation_ctx ctx = { false, false };
1628 struct amdgpu_vm *vm = &fpriv->vm;
1629 struct amdgpu_bo_va_mapping *mapping;
1632 addr /= AMDGPU_GPU_PAGE_SIZE;
1634 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1635 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1638 *bo = mapping->bo_va->base.bo;
1641 /* Double check that the BO is reserved by this CS */
1642 if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
1645 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1646 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1647 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1648 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1653 return amdgpu_ttm_alloc_gart(&(*bo)->tbo);