2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include "amdgpu_cs.h"
37 #include "amdgpu_trace.h"
38 #include "amdgpu_gmc.h"
39 #include "amdgpu_gem.h"
40 #include "amdgpu_ras.h"
42 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
43 struct drm_amdgpu_cs_chunk_fence *data,
46 struct drm_gem_object *gobj;
51 gobj = drm_gem_object_lookup(p->filp, data->handle);
55 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
56 p->uf_entry.priority = 0;
57 p->uf_entry.tv.bo = &bo->tbo;
58 /* One for TTM and two for the CS job */
59 p->uf_entry.tv.num_shared = 3;
61 drm_gem_object_put(gobj);
63 size = amdgpu_bo_size(bo);
64 if (size != PAGE_SIZE || (data->offset + 8) > size) {
69 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
74 *offset = data->offset;
83 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
84 struct drm_amdgpu_bo_list_in *data)
87 struct drm_amdgpu_bo_list_entry *info = NULL;
89 r = amdgpu_bo_create_list_entry_array(data, &info);
93 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
107 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
109 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
110 struct amdgpu_vm *vm = &fpriv->vm;
111 uint64_t *chunk_array_user;
112 uint64_t *chunk_array;
113 unsigned size, num_ibs = 0;
114 uint32_t uf_offset = 0;
118 if (cs->in.num_chunks == 0)
121 chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
125 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
131 mutex_lock(&p->ctx->lock);
133 /* skip guilty context job */
134 if (atomic_read(&p->ctx->guilty) == 1) {
140 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
141 if (copy_from_user(chunk_array, chunk_array_user,
142 sizeof(uint64_t)*cs->in.num_chunks)) {
147 p->nchunks = cs->in.num_chunks;
148 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
155 for (i = 0; i < p->nchunks; i++) {
156 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
157 struct drm_amdgpu_cs_chunk user_chunk;
158 uint32_t __user *cdata;
160 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
161 if (copy_from_user(&user_chunk, chunk_ptr,
162 sizeof(struct drm_amdgpu_cs_chunk))) {
165 goto free_partial_kdata;
167 p->chunks[i].chunk_id = user_chunk.chunk_id;
168 p->chunks[i].length_dw = user_chunk.length_dw;
170 size = p->chunks[i].length_dw;
171 cdata = u64_to_user_ptr(user_chunk.chunk_data);
173 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
174 if (p->chunks[i].kdata == NULL) {
177 goto free_partial_kdata;
179 size *= sizeof(uint32_t);
180 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
182 goto free_partial_kdata;
185 switch (p->chunks[i].chunk_id) {
186 case AMDGPU_CHUNK_ID_IB:
190 case AMDGPU_CHUNK_ID_FENCE:
191 size = sizeof(struct drm_amdgpu_cs_chunk_fence);
192 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
194 goto free_partial_kdata;
197 ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
200 goto free_partial_kdata;
204 case AMDGPU_CHUNK_ID_BO_HANDLES:
205 size = sizeof(struct drm_amdgpu_bo_list_in);
206 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
208 goto free_partial_kdata;
211 ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
213 goto free_partial_kdata;
217 case AMDGPU_CHUNK_ID_DEPENDENCIES:
218 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
219 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
220 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
221 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
222 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
227 goto free_partial_kdata;
231 ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
235 if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
240 if (p->uf_entry.tv.bo)
241 p->job->uf_addr = uf_offset;
244 /* Use this opportunity to fill in task info for the vm */
245 amdgpu_vm_set_task_info(vm);
253 kvfree(p->chunks[i].kdata);
263 /* Convert microseconds to bytes. */
264 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
266 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
269 /* Since accum_us is incremented by a million per second, just
270 * multiply it by the number of MB/s to get the number of bytes.
272 return us << adev->mm_stats.log2_max_MBps;
275 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
277 if (!adev->mm_stats.log2_max_MBps)
280 return bytes >> adev->mm_stats.log2_max_MBps;
283 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
284 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
285 * which means it can go over the threshold once. If that happens, the driver
286 * will be in debt and no other buffer migrations can be done until that debt
289 * This approach allows moving a buffer of any size (it's important to allow
292 * The currency is simply time in microseconds and it increases as the clock
293 * ticks. The accumulated microseconds (us) are converted to bytes and
296 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
300 s64 time_us, increment_us;
301 u64 free_vram, total_vram, used_vram;
302 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
305 * It means that in order to get full max MBps, at least 5 IBs per
306 * second must be submitted and not more than 200ms apart from each
309 const s64 us_upper_bound = 200000;
311 if (!adev->mm_stats.log2_max_MBps) {
317 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
318 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
319 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
321 spin_lock(&adev->mm_stats.lock);
323 /* Increase the amount of accumulated us. */
324 time_us = ktime_to_us(ktime_get());
325 increment_us = time_us - adev->mm_stats.last_update_us;
326 adev->mm_stats.last_update_us = time_us;
327 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
330 /* This prevents the short period of low performance when the VRAM
331 * usage is low and the driver is in debt or doesn't have enough
332 * accumulated us to fill VRAM quickly.
334 * The situation can occur in these cases:
335 * - a lot of VRAM is freed by userspace
336 * - the presence of a big buffer causes a lot of evictions
337 * (solution: split buffers into smaller ones)
339 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
340 * accum_us to a positive number.
342 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
345 /* Be more aggressive on dGPUs. Try to fill a portion of free
348 if (!(adev->flags & AMD_IS_APU))
349 min_us = bytes_to_us(adev, free_vram / 4);
351 min_us = 0; /* Reset accum_us on APUs. */
353 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
356 /* This is set to 0 if the driver is in debt to disallow (optional)
359 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
361 /* Do the same for visible VRAM if half of it is free */
362 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
363 u64 total_vis_vram = adev->gmc.visible_vram_size;
365 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
367 if (used_vis_vram < total_vis_vram) {
368 u64 free_vis_vram = total_vis_vram - used_vis_vram;
369 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
370 increment_us, us_upper_bound);
372 if (free_vis_vram >= total_vis_vram / 2)
373 adev->mm_stats.accum_us_vis =
374 max(bytes_to_us(adev, free_vis_vram / 2),
375 adev->mm_stats.accum_us_vis);
378 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
383 spin_unlock(&adev->mm_stats.lock);
386 /* Report how many bytes have really been moved for the last command
387 * submission. This can result in a debt that can stop buffer migrations
390 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
393 spin_lock(&adev->mm_stats.lock);
394 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
395 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
396 spin_unlock(&adev->mm_stats.lock);
399 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
401 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
402 struct amdgpu_cs_parser *p = param;
403 struct ttm_operation_ctx ctx = {
404 .interruptible = true,
405 .no_wait_gpu = false,
406 .resv = bo->tbo.base.resv
411 if (bo->tbo.pin_count)
414 /* Don't move this buffer if we have depleted our allowance
415 * to move it. Don't move anything if the threshold is zero.
417 if (p->bytes_moved < p->bytes_moved_threshold &&
418 (!bo->tbo.base.dma_buf ||
419 list_empty(&bo->tbo.base.dma_buf->attachments))) {
420 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
421 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
422 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
423 * visible VRAM if we've depleted our allowance to do
426 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
427 domain = bo->preferred_domains;
429 domain = bo->allowed_domains;
431 domain = bo->preferred_domains;
434 domain = bo->allowed_domains;
438 amdgpu_bo_placement_from_domain(bo, domain);
439 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
441 p->bytes_moved += ctx.bytes_moved;
442 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
443 amdgpu_bo_in_cpu_visible_vram(bo))
444 p->bytes_moved_vis += ctx.bytes_moved;
446 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
447 domain = bo->allowed_domains;
454 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
455 struct list_head *validated)
457 struct ttm_operation_ctx ctx = { true, false };
458 struct amdgpu_bo_list_entry *lobj;
461 list_for_each_entry(lobj, validated, tv.head) {
462 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
463 struct mm_struct *usermm;
465 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
466 if (usermm && usermm != current->mm)
469 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
470 lobj->user_invalidated && lobj->user_pages) {
471 amdgpu_bo_placement_from_domain(bo,
472 AMDGPU_GEM_DOMAIN_CPU);
473 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
477 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
481 r = amdgpu_cs_bo_validate(p, bo);
485 kvfree(lobj->user_pages);
486 lobj->user_pages = NULL;
491 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
492 union drm_amdgpu_cs *cs)
494 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
495 struct amdgpu_vm *vm = &fpriv->vm;
496 struct amdgpu_bo_list_entry *e;
497 struct list_head duplicates;
498 struct amdgpu_bo *gds;
499 struct amdgpu_bo *gws;
500 struct amdgpu_bo *oa;
503 INIT_LIST_HEAD(&p->validated);
505 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
506 if (cs->in.bo_list_handle) {
510 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
514 } else if (!p->bo_list) {
515 /* Create a empty bo_list when no handle is provided */
516 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
522 mutex_lock(&p->bo_list->bo_list_mutex);
524 /* One for TTM and one for the CS job */
525 amdgpu_bo_list_for_each_entry(e, p->bo_list)
526 e->tv.num_shared = 2;
528 amdgpu_bo_list_get_list(p->bo_list, &p->validated);
530 INIT_LIST_HEAD(&duplicates);
531 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
533 if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
534 list_add(&p->uf_entry.tv.head, &p->validated);
536 /* Get userptr backing pages. If pages are updated after registered
537 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
538 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
540 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
541 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
542 bool userpage_invalidated = false;
545 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
546 sizeof(struct page *),
547 GFP_KERNEL | __GFP_ZERO);
548 if (!e->user_pages) {
549 DRM_ERROR("kvmalloc_array failure\n");
551 goto out_free_user_pages;
554 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
556 kvfree(e->user_pages);
557 e->user_pages = NULL;
558 goto out_free_user_pages;
561 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
562 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
563 userpage_invalidated = true;
567 e->user_invalidated = userpage_invalidated;
570 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
572 if (unlikely(r != 0)) {
573 if (r != -ERESTARTSYS)
574 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
575 goto out_free_user_pages;
578 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
579 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
581 e->bo_va = amdgpu_vm_bo_find(vm, bo);
584 /* Move fence waiting after getting reservation lock of
585 * PD root. Then there is no need on a ctx mutex lock.
587 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
588 if (unlikely(r != 0)) {
589 if (r != -ERESTARTSYS)
590 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
594 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
595 &p->bytes_moved_vis_threshold);
597 p->bytes_moved_vis = 0;
599 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
600 amdgpu_cs_bo_validate, p);
602 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
606 r = amdgpu_cs_list_validate(p, &duplicates);
610 r = amdgpu_cs_list_validate(p, &p->validated);
614 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
617 gds = p->bo_list->gds_obj;
618 gws = p->bo_list->gws_obj;
619 oa = p->bo_list->oa_obj;
622 p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
623 p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
626 p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
627 p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
630 p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
631 p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
634 if (!r && p->uf_entry.tv.bo) {
635 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
637 r = amdgpu_ttm_alloc_gart(&uf->tbo);
638 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
643 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
647 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
648 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
652 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
653 kvfree(e->user_pages);
654 e->user_pages = NULL;
656 mutex_unlock(&p->bo_list->bo_list_mutex);
661 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
663 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
664 struct amdgpu_bo_list_entry *e;
667 list_for_each_entry(e, &p->validated, tv.head) {
668 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
669 struct dma_resv *resv = bo->tbo.base.resv;
670 enum amdgpu_sync_mode sync_mode;
672 sync_mode = amdgpu_bo_explicit_sync(bo) ?
673 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
674 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
683 * amdgpu_cs_parser_fini() - clean parser states
684 * @parser: parser structure holding parsing context.
685 * @error: error number
686 * @backoff: indicator to backoff the reservation
688 * If error is set then unvalidate buffer, otherwise just free memory
689 * used by parsing context.
691 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
696 if (error && backoff) {
697 ttm_eu_backoff_reservation(&parser->ticket,
699 mutex_unlock(&parser->bo_list->bo_list_mutex);
702 for (i = 0; i < parser->num_post_deps; i++) {
703 drm_syncobj_put(parser->post_deps[i].syncobj);
704 kfree(parser->post_deps[i].chain);
706 kfree(parser->post_deps);
708 dma_fence_put(parser->fence);
711 mutex_unlock(&parser->ctx->lock);
712 amdgpu_ctx_put(parser->ctx);
715 amdgpu_bo_list_put(parser->bo_list);
717 for (i = 0; i < parser->nchunks; i++)
718 kvfree(parser->chunks[i].kdata);
719 kvfree(parser->chunks);
721 amdgpu_job_free(parser->job);
722 if (parser->uf_entry.tv.bo) {
723 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
725 amdgpu_bo_unref(&uf);
729 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
731 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
732 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
733 struct amdgpu_device *adev = p->adev;
734 struct amdgpu_vm *vm = &fpriv->vm;
735 struct amdgpu_bo_list_entry *e;
736 struct amdgpu_bo_va *bo_va;
737 struct amdgpu_bo *bo;
740 /* Only for UVD/VCE VM emulation */
741 if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
744 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
745 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
746 struct amdgpu_bo_va_mapping *m;
747 struct amdgpu_bo *aobj = NULL;
748 struct amdgpu_cs_chunk *chunk;
749 uint64_t offset, va_start;
750 struct amdgpu_ib *ib;
753 chunk = &p->chunks[i];
754 ib = &p->job->ibs[j];
755 chunk_ib = chunk->kdata;
757 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
760 va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
761 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
763 DRM_ERROR("IB va_start is invalid\n");
767 if ((va_start + chunk_ib->ib_bytes) >
768 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
769 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
773 /* the IB should be reserved at this point */
774 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
779 offset = m->start * AMDGPU_GPU_PAGE_SIZE;
780 kptr += va_start - offset;
782 if (ring->funcs->parse_cs) {
783 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
784 amdgpu_bo_kunmap(aobj);
786 r = amdgpu_ring_parse_cs(ring, p, p->job, ib);
790 ib->ptr = (uint32_t *)kptr;
791 r = amdgpu_ring_patch_cs_in_place(ring, p, p->job, ib);
792 amdgpu_bo_kunmap(aobj);
802 return amdgpu_cs_sync_rings(p);
805 r = amdgpu_vm_clear_freed(adev, vm, NULL);
809 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
813 r = amdgpu_sync_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
817 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
818 bo_va = fpriv->csa_va;
820 r = amdgpu_vm_bo_update(adev, bo_va, false);
824 r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
829 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
830 /* ignore duplicates */
831 bo = ttm_to_amdgpu_bo(e->tv.bo);
839 r = amdgpu_vm_bo_update(adev, bo_va, false);
841 mutex_unlock(&p->bo_list->bo_list_mutex);
845 r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
847 mutex_unlock(&p->bo_list->bo_list_mutex);
852 r = amdgpu_vm_handle_moved(adev, vm);
856 r = amdgpu_vm_update_pdes(adev, vm, false);
860 r = amdgpu_sync_fence(&p->job->sync, vm->last_update);
864 p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
866 if (amdgpu_vm_debug) {
867 /* Invalidate all BOs to test for userspace bugs */
868 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
869 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
871 /* ignore duplicates */
875 amdgpu_vm_bo_invalidate(adev, bo, false);
879 return amdgpu_cs_sync_rings(p);
882 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
883 struct amdgpu_cs_parser *parser)
885 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
886 struct amdgpu_vm *vm = &fpriv->vm;
887 int r, ce_preempt = 0, de_preempt = 0;
888 struct amdgpu_ring *ring;
891 for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
892 struct amdgpu_cs_chunk *chunk;
893 struct amdgpu_ib *ib;
894 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
895 struct drm_sched_entity *entity;
897 chunk = &parser->chunks[i];
898 ib = &parser->job->ibs[j];
899 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
901 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
904 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
905 (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
906 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
907 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
913 /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
914 if (ce_preempt > 1 || de_preempt > 1)
918 r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
919 chunk_ib->ip_instance, chunk_ib->ring,
924 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
925 parser->job->preamble_status |=
926 AMDGPU_PREAMBLE_IB_PRESENT;
928 if (parser->entity && parser->entity != entity)
931 /* Return if there is no run queue associated with this entity.
932 * Possibly because of disabled HW IP*/
933 if (entity->rq == NULL)
936 parser->entity = entity;
938 ring = to_amdgpu_ring(entity->rq->sched);
939 r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
940 chunk_ib->ib_bytes : 0,
941 AMDGPU_IB_POOL_DELAYED, ib);
943 DRM_ERROR("Failed to get ib !\n");
947 ib->gpu_addr = chunk_ib->va_start;
948 ib->length_dw = chunk_ib->ib_bytes / 4;
949 ib->flags = chunk_ib->flags;
954 /* MM engine doesn't support user fences */
955 ring = to_amdgpu_ring(parser->entity->rq->sched);
956 if (parser->job->uf_addr && ring->funcs->no_user_fence)
962 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
963 struct amdgpu_cs_chunk *chunk)
965 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
968 struct drm_amdgpu_cs_chunk_dep *deps;
970 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
971 num_deps = chunk->length_dw * 4 /
972 sizeof(struct drm_amdgpu_cs_chunk_dep);
974 for (i = 0; i < num_deps; ++i) {
975 struct amdgpu_ctx *ctx;
976 struct drm_sched_entity *entity;
977 struct dma_fence *fence;
979 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
983 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
985 deps[i].ring, &entity);
991 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
995 return PTR_ERR(fence);
999 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
1000 struct drm_sched_fence *s_fence;
1001 struct dma_fence *old = fence;
1003 s_fence = to_drm_sched_fence(fence);
1004 fence = dma_fence_get(&s_fence->scheduled);
1008 r = amdgpu_sync_fence(&p->job->sync, fence);
1009 dma_fence_put(fence);
1016 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1017 uint32_t handle, u64 point,
1020 struct dma_fence *fence;
1023 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
1025 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1030 r = amdgpu_sync_fence(&p->job->sync, fence);
1031 dma_fence_put(fence);
1036 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1037 struct amdgpu_cs_chunk *chunk)
1039 struct drm_amdgpu_cs_chunk_sem *deps;
1043 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1044 num_deps = chunk->length_dw * 4 /
1045 sizeof(struct drm_amdgpu_cs_chunk_sem);
1046 for (i = 0; i < num_deps; ++i) {
1047 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1057 static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1058 struct amdgpu_cs_chunk *chunk)
1060 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1064 syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1065 num_deps = chunk->length_dw * 4 /
1066 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1067 for (i = 0; i < num_deps; ++i) {
1068 r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1069 syncobj_deps[i].handle,
1070 syncobj_deps[i].point,
1071 syncobj_deps[i].flags);
1079 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1080 struct amdgpu_cs_chunk *chunk)
1082 struct drm_amdgpu_cs_chunk_sem *deps;
1086 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1087 num_deps = chunk->length_dw * 4 /
1088 sizeof(struct drm_amdgpu_cs_chunk_sem);
1093 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1095 p->num_post_deps = 0;
1101 for (i = 0; i < num_deps; ++i) {
1102 p->post_deps[i].syncobj =
1103 drm_syncobj_find(p->filp, deps[i].handle);
1104 if (!p->post_deps[i].syncobj)
1106 p->post_deps[i].chain = NULL;
1107 p->post_deps[i].point = 0;
1115 static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1116 struct amdgpu_cs_chunk *chunk)
1118 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1122 syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1123 num_deps = chunk->length_dw * 4 /
1124 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1129 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1131 p->num_post_deps = 0;
1136 for (i = 0; i < num_deps; ++i) {
1137 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1140 if (syncobj_deps[i].point) {
1141 dep->chain = dma_fence_chain_alloc();
1146 dep->syncobj = drm_syncobj_find(p->filp,
1147 syncobj_deps[i].handle);
1148 if (!dep->syncobj) {
1149 dma_fence_chain_free(dep->chain);
1152 dep->point = syncobj_deps[i].point;
1159 static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1160 struct amdgpu_cs_parser *p)
1164 /* TODO: Investigate why we still need the context lock */
1165 mutex_unlock(&p->ctx->lock);
1167 for (i = 0; i < p->nchunks; ++i) {
1168 struct amdgpu_cs_chunk *chunk;
1170 chunk = &p->chunks[i];
1172 switch (chunk->chunk_id) {
1173 case AMDGPU_CHUNK_ID_DEPENDENCIES:
1174 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1175 r = amdgpu_cs_process_fence_dep(p, chunk);
1179 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1180 r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1184 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1185 r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1189 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1190 r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1194 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1195 r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1203 mutex_lock(&p->ctx->lock);
1207 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1211 for (i = 0; i < p->num_post_deps; ++i) {
1212 if (p->post_deps[i].chain && p->post_deps[i].point) {
1213 drm_syncobj_add_point(p->post_deps[i].syncobj,
1214 p->post_deps[i].chain,
1215 p->fence, p->post_deps[i].point);
1216 p->post_deps[i].chain = NULL;
1218 drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1224 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1225 union drm_amdgpu_cs *cs)
1227 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1228 struct drm_sched_entity *entity = p->entity;
1229 struct amdgpu_bo_list_entry *e;
1230 struct amdgpu_job *job;
1237 r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
1241 drm_sched_job_arm(&job->base);
1243 /* No memory allocation is allowed while holding the notifier lock.
1244 * The lock is held until amdgpu_cs_submit is finished and fence is
1247 mutex_lock(&p->adev->notifier_lock);
1249 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1250 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1252 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1253 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1255 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1262 p->fence = dma_fence_get(&job->base.s_fence->finished);
1264 seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
1265 amdgpu_cs_post_dependencies(p);
1267 if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1268 !p->ctx->preamble_presented) {
1269 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1270 p->ctx->preamble_presented = true;
1273 cs->out.handle = seq;
1274 job->uf_sequence = seq;
1276 amdgpu_job_free_resources(job);
1278 trace_amdgpu_cs_ioctl(job);
1279 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1280 drm_sched_entity_push_job(&job->base);
1282 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1284 /* Make sure all BOs are remembered as writers */
1285 amdgpu_bo_list_for_each_entry(e, p->bo_list)
1286 e->tv.num_shared = 0;
1288 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1289 mutex_unlock(&p->adev->notifier_lock);
1290 mutex_unlock(&p->bo_list->bo_list_mutex);
1295 drm_sched_job_cleanup(&job->base);
1296 mutex_unlock(&p->adev->notifier_lock);
1299 amdgpu_job_free(job);
1303 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
1307 if (!trace_amdgpu_cs_enabled())
1310 for (i = 0; i < parser->job->num_ibs; i++)
1311 trace_amdgpu_cs(parser, i);
1314 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1316 struct amdgpu_device *adev = drm_to_adev(dev);
1317 union drm_amdgpu_cs *cs = data;
1318 struct amdgpu_cs_parser parser = {};
1319 bool reserved_buffers = false;
1322 if (amdgpu_ras_intr_triggered())
1325 if (!adev->accel_working)
1331 r = amdgpu_cs_parser_init(&parser, data);
1333 if (printk_ratelimit())
1334 DRM_ERROR("Failed to initialize parser %d!\n", r);
1338 r = amdgpu_cs_ib_fill(adev, &parser);
1342 r = amdgpu_cs_dependencies(adev, &parser);
1344 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1348 r = amdgpu_cs_parser_bos(&parser, data);
1351 DRM_ERROR("Not enough memory for command submission!\n");
1352 else if (r != -ERESTARTSYS && r != -EAGAIN)
1353 DRM_ERROR("Failed to process the buffer list %d!\n", r);
1357 reserved_buffers = true;
1359 trace_amdgpu_cs_ibs(&parser);
1361 r = amdgpu_cs_vm_handling(&parser);
1365 r = amdgpu_cs_submit(&parser, cs);
1368 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1374 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1377 * @data: data from userspace
1378 * @filp: file private
1380 * Wait for the command submission identified by handle to finish.
1382 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1383 struct drm_file *filp)
1385 union drm_amdgpu_wait_cs *wait = data;
1386 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1387 struct drm_sched_entity *entity;
1388 struct amdgpu_ctx *ctx;
1389 struct dma_fence *fence;
1392 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1396 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1397 wait->in.ring, &entity);
1399 amdgpu_ctx_put(ctx);
1403 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1407 r = dma_fence_wait_timeout(fence, true, timeout);
1408 if (r > 0 && fence->error)
1410 dma_fence_put(fence);
1414 amdgpu_ctx_put(ctx);
1418 memset(wait, 0, sizeof(*wait));
1419 wait->out.status = (r == 0);
1425 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1427 * @adev: amdgpu device
1428 * @filp: file private
1429 * @user: drm_amdgpu_fence copied from user space
1431 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1432 struct drm_file *filp,
1433 struct drm_amdgpu_fence *user)
1435 struct drm_sched_entity *entity;
1436 struct amdgpu_ctx *ctx;
1437 struct dma_fence *fence;
1440 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1442 return ERR_PTR(-EINVAL);
1444 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1445 user->ring, &entity);
1447 amdgpu_ctx_put(ctx);
1451 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1452 amdgpu_ctx_put(ctx);
1457 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1458 struct drm_file *filp)
1460 struct amdgpu_device *adev = drm_to_adev(dev);
1461 union drm_amdgpu_fence_to_handle *info = data;
1462 struct dma_fence *fence;
1463 struct drm_syncobj *syncobj;
1464 struct sync_file *sync_file;
1467 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1469 return PTR_ERR(fence);
1472 fence = dma_fence_get_stub();
1474 switch (info->in.what) {
1475 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1476 r = drm_syncobj_create(&syncobj, 0, fence);
1477 dma_fence_put(fence);
1480 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1481 drm_syncobj_put(syncobj);
1484 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1485 r = drm_syncobj_create(&syncobj, 0, fence);
1486 dma_fence_put(fence);
1489 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1490 drm_syncobj_put(syncobj);
1493 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1494 fd = get_unused_fd_flags(O_CLOEXEC);
1496 dma_fence_put(fence);
1500 sync_file = sync_file_create(fence);
1501 dma_fence_put(fence);
1507 fd_install(fd, sync_file->file);
1508 info->out.handle = fd;
1512 dma_fence_put(fence);
1518 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1520 * @adev: amdgpu device
1521 * @filp: file private
1522 * @wait: wait parameters
1523 * @fences: array of drm_amdgpu_fence
1525 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1526 struct drm_file *filp,
1527 union drm_amdgpu_wait_fences *wait,
1528 struct drm_amdgpu_fence *fences)
1530 uint32_t fence_count = wait->in.fence_count;
1534 for (i = 0; i < fence_count; i++) {
1535 struct dma_fence *fence;
1536 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1538 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1540 return PTR_ERR(fence);
1544 r = dma_fence_wait_timeout(fence, true, timeout);
1545 dma_fence_put(fence);
1553 return fence->error;
1556 memset(wait, 0, sizeof(*wait));
1557 wait->out.status = (r > 0);
1563 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1565 * @adev: amdgpu device
1566 * @filp: file private
1567 * @wait: wait parameters
1568 * @fences: array of drm_amdgpu_fence
1570 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1571 struct drm_file *filp,
1572 union drm_amdgpu_wait_fences *wait,
1573 struct drm_amdgpu_fence *fences)
1575 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1576 uint32_t fence_count = wait->in.fence_count;
1577 uint32_t first = ~0;
1578 struct dma_fence **array;
1582 /* Prepare the fence array */
1583 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1588 for (i = 0; i < fence_count; i++) {
1589 struct dma_fence *fence;
1591 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1592 if (IS_ERR(fence)) {
1594 goto err_free_fence_array;
1597 } else { /* NULL, the fence has been already signaled */
1604 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1607 goto err_free_fence_array;
1610 memset(wait, 0, sizeof(*wait));
1611 wait->out.status = (r > 0);
1612 wait->out.first_signaled = first;
1614 if (first < fence_count && array[first])
1615 r = array[first]->error;
1619 err_free_fence_array:
1620 for (i = 0; i < fence_count; i++)
1621 dma_fence_put(array[i]);
1628 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1631 * @data: data from userspace
1632 * @filp: file private
1634 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1635 struct drm_file *filp)
1637 struct amdgpu_device *adev = drm_to_adev(dev);
1638 union drm_amdgpu_wait_fences *wait = data;
1639 uint32_t fence_count = wait->in.fence_count;
1640 struct drm_amdgpu_fence *fences_user;
1641 struct drm_amdgpu_fence *fences;
1644 /* Get the fences from userspace */
1645 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1650 fences_user = u64_to_user_ptr(wait->in.fences);
1651 if (copy_from_user(fences, fences_user,
1652 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1654 goto err_free_fences;
1657 if (wait->in.wait_all)
1658 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1660 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1669 * amdgpu_cs_find_mapping - find bo_va for VM address
1671 * @parser: command submission parser context
1673 * @bo: resulting BO of the mapping found
1674 * @map: Placeholder to return found BO mapping
1676 * Search the buffer objects in the command submission context for a certain
1677 * virtual memory address. Returns allocation structure when found, NULL
1680 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1681 uint64_t addr, struct amdgpu_bo **bo,
1682 struct amdgpu_bo_va_mapping **map)
1684 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1685 struct ttm_operation_ctx ctx = { false, false };
1686 struct amdgpu_vm *vm = &fpriv->vm;
1687 struct amdgpu_bo_va_mapping *mapping;
1690 addr /= AMDGPU_GPU_PAGE_SIZE;
1692 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1693 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1696 *bo = mapping->bo_va->base.bo;
1699 /* Double check that the BO is reserved by this CS */
1700 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1703 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1704 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1705 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1706 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1711 return amdgpu_ttm_alloc_gart(&(*bo)->tbo);