2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <linux/dma-buf.h>
36 #include <drm/drm_drv.h>
37 #include <drm/amdgpu_drm.h>
38 #include <drm/drm_cache.h>
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
46 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
47 * represents memory used by driver (VRAM, system memory, etc.). The driver
48 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
49 * to create/destroy/set buffer object which are then managed by the kernel TTM
51 * The interfaces are also used internally by kernel clients, including gfx,
52 * uvd, etc. for kernel managed allocations used by the GPU.
56 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
58 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
62 if (bo->tbo.base.import_attach)
63 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
64 drm_gem_object_release(&bo->tbo.base);
65 amdgpu_bo_unref(&bo->parent);
69 static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
71 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
72 struct amdgpu_bo_user *ubo;
74 ubo = to_amdgpu_bo_user(bo);
76 amdgpu_bo_destroy(tbo);
79 static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
81 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
82 struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
83 struct amdgpu_bo_vm *vmbo;
85 bo = shadow_bo->parent;
86 vmbo = to_amdgpu_bo_vm(bo);
87 /* in case amdgpu_device_recover_vram got NULL of bo->parent */
88 if (!list_empty(&vmbo->shadow_list)) {
89 mutex_lock(&adev->shadow_list_lock);
90 list_del_init(&vmbo->shadow_list);
91 mutex_unlock(&adev->shadow_list_lock);
94 amdgpu_bo_destroy(tbo);
98 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
99 * @bo: buffer object to be checked
101 * Uses destroy function associated with the object to determine if this is
105 * true if the object belongs to &amdgpu_bo, false if not.
107 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
109 if (bo->destroy == &amdgpu_bo_destroy ||
110 bo->destroy == &amdgpu_bo_user_destroy ||
111 bo->destroy == &amdgpu_bo_vm_destroy)
118 * amdgpu_bo_placement_from_domain - set buffer's placement
119 * @abo: &amdgpu_bo buffer object whose placement is to be set
120 * @domain: requested domain
122 * Sets buffer's placement according to requested domain and the buffer's
125 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
127 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
128 struct ttm_placement *placement = &abo->placement;
129 struct ttm_place *places = abo->placements;
130 u64 flags = abo->flags;
133 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
134 unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
135 int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
137 if (adev->gmc.mem_partitions && mem_id >= 0) {
138 places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
140 * memory partition range lpfn is inclusive start + size - 1
141 * TTM place lpfn is exclusive start + size
143 places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
148 places[c].mem_type = TTM_PL_VRAM;
151 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
152 places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn);
154 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
156 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
157 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
161 if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) {
164 places[c].mem_type = AMDGPU_PL_DOORBELL;
169 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
173 abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
174 AMDGPU_PL_PREEMPT : TTM_PL_TT;
177 * When GTT is just an alternative to VRAM make sure that we
178 * only use it as fallback and still try to fill up VRAM first.
180 if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
181 places[c].flags |= TTM_PL_FLAG_FALLBACK;
185 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
188 places[c].mem_type = TTM_PL_SYSTEM;
193 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
196 places[c].mem_type = AMDGPU_PL_GDS;
201 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
204 places[c].mem_type = AMDGPU_PL_GWS;
209 if (domain & AMDGPU_GEM_DOMAIN_OA) {
212 places[c].mem_type = AMDGPU_PL_OA;
220 places[c].mem_type = TTM_PL_SYSTEM;
225 BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
227 placement->num_placement = c;
228 placement->placement = places;
232 * amdgpu_bo_create_reserved - create reserved BO for kernel use
234 * @adev: amdgpu device object
235 * @size: size for the new BO
236 * @align: alignment for the new BO
237 * @domain: where to place it
238 * @bo_ptr: used to initialize BOs in structures
239 * @gpu_addr: GPU addr of the pinned BO
240 * @cpu_addr: optional CPU address mapping
242 * Allocates and pins a BO for kernel internal use, and returns it still
245 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
248 * 0 on success, negative error code otherwise.
250 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
251 unsigned long size, int align,
252 u32 domain, struct amdgpu_bo **bo_ptr,
253 u64 *gpu_addr, void **cpu_addr)
255 struct amdgpu_bo_param bp;
260 amdgpu_bo_unref(bo_ptr);
264 memset(&bp, 0, sizeof(bp));
266 bp.byte_align = align;
268 bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
269 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
270 bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
271 bp.type = ttm_bo_type_kernel;
273 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
276 r = amdgpu_bo_create(adev, &bp, bo_ptr);
278 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
285 r = amdgpu_bo_reserve(*bo_ptr, false);
287 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
291 r = amdgpu_bo_pin(*bo_ptr, domain);
293 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
294 goto error_unreserve;
297 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
299 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
304 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
307 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
309 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
317 amdgpu_bo_unpin(*bo_ptr);
319 amdgpu_bo_unreserve(*bo_ptr);
323 amdgpu_bo_unref(bo_ptr);
329 * amdgpu_bo_create_kernel - create BO for kernel use
331 * @adev: amdgpu device object
332 * @size: size for the new BO
333 * @align: alignment for the new BO
334 * @domain: where to place it
335 * @bo_ptr: used to initialize BOs in structures
336 * @gpu_addr: GPU addr of the pinned BO
337 * @cpu_addr: optional CPU address mapping
339 * Allocates and pins a BO for kernel internal use.
341 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
344 * 0 on success, negative error code otherwise.
346 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
347 unsigned long size, int align,
348 u32 domain, struct amdgpu_bo **bo_ptr,
349 u64 *gpu_addr, void **cpu_addr)
353 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
360 amdgpu_bo_unreserve(*bo_ptr);
366 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
368 * @adev: amdgpu device object
369 * @offset: offset of the BO
370 * @size: size of the BO
371 * @bo_ptr: used to initialize BOs in structures
372 * @cpu_addr: optional CPU address mapping
374 * Creates a kernel BO at a specific offset in VRAM.
377 * 0 on success, negative error code otherwise.
379 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
380 uint64_t offset, uint64_t size,
381 struct amdgpu_bo **bo_ptr, void **cpu_addr)
383 struct ttm_operation_ctx ctx = { false, false };
388 size = ALIGN(size, PAGE_SIZE);
390 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
391 AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
396 if ((*bo_ptr) == NULL)
400 * Remove the original mem node and create a new one at the request
404 amdgpu_bo_kunmap(*bo_ptr);
406 ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
408 for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
409 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
410 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
412 r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
413 &(*bo_ptr)->tbo.resource, &ctx);
418 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
423 amdgpu_bo_unreserve(*bo_ptr);
427 amdgpu_bo_unreserve(*bo_ptr);
428 amdgpu_bo_unref(bo_ptr);
433 * amdgpu_bo_free_kernel - free BO for kernel use
435 * @bo: amdgpu BO to free
436 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
437 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
439 * unmaps and unpin a BO for kernel internal use.
441 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
447 WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
449 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
451 amdgpu_bo_kunmap(*bo);
453 amdgpu_bo_unpin(*bo);
454 amdgpu_bo_unreserve(*bo);
465 /* Validate bo size is bit bigger than the request domain */
466 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
467 unsigned long size, u32 domain)
469 struct ttm_resource_manager *man = NULL;
472 * If GTT is part of requested domains the check must succeed to
473 * allow fall back to GTT.
475 if (domain & AMDGPU_GEM_DOMAIN_GTT)
476 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
477 else if (domain & AMDGPU_GEM_DOMAIN_VRAM)
478 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
483 if (domain & AMDGPU_GEM_DOMAIN_GTT)
484 WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
488 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */
489 if (size < man->size)
492 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, man->size);
496 bool amdgpu_bo_support_uswc(u64 bo_flags)
500 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
501 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
504 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
505 /* Don't try to enable write-combining when it can't work, or things
507 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
510 #ifndef CONFIG_COMPILE_TEST
511 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
512 thanks to write-combining
515 if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
516 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
517 "better performance thanks to write-combining\n");
520 /* For architectures that don't support WC memory,
521 * mask out the WC flag from the BO
523 if (!drm_arch_can_wc_memory())
531 * amdgpu_bo_create - create an &amdgpu_bo buffer object
532 * @adev: amdgpu device object
533 * @bp: parameters to be used for the buffer object
534 * @bo_ptr: pointer to the buffer object pointer
536 * Creates an &amdgpu_bo buffer object.
539 * 0 for success or a negative error code on failure.
541 int amdgpu_bo_create(struct amdgpu_device *adev,
542 struct amdgpu_bo_param *bp,
543 struct amdgpu_bo **bo_ptr)
545 struct ttm_operation_ctx ctx = {
546 .interruptible = (bp->type != ttm_bo_type_kernel),
547 .no_wait_gpu = bp->no_wait_gpu,
548 /* We opt to avoid OOM on system pages allocations */
549 .gfp_retry_mayfail = true,
550 .allow_res_evict = bp->type != ttm_bo_type_kernel,
553 struct amdgpu_bo *bo;
554 unsigned long page_align, size = bp->size;
557 /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
558 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
559 /* GWS and OA don't need any alignment. */
560 page_align = bp->byte_align;
563 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
564 /* Both size and alignment must be a multiple of 4. */
565 page_align = ALIGN(bp->byte_align, 4);
566 size = ALIGN(size, 4) << PAGE_SHIFT;
568 /* Memory should be aligned at least to a page size. */
569 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
570 size = ALIGN(size, PAGE_SIZE);
573 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
576 BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
579 bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
582 drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
584 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
586 bo->allowed_domains = bo->preferred_domains;
587 if (bp->type != ttm_bo_type_kernel &&
588 !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
589 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
590 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
592 bo->flags = bp->flags;
594 if (adev->gmc.mem_partitions)
595 /* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */
596 bo->xcp_id = bp->xcp_id_plus1 - 1;
598 /* For GPUs without spatial partitioning */
601 if (!amdgpu_bo_support_uswc(bo->flags))
602 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
604 if (adev->ras_enabled)
605 bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
607 bo->tbo.bdev = &adev->mman.bdev;
608 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
609 AMDGPU_GEM_DOMAIN_GDS))
610 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
612 amdgpu_bo_placement_from_domain(bo, bp->domain);
613 if (bp->type == ttm_bo_type_kernel)
614 bo->tbo.priority = 1;
617 bp->destroy = &amdgpu_bo_destroy;
619 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
620 &bo->placement, page_align, &ctx, NULL,
621 bp->resv, bp->destroy);
622 if (unlikely(r != 0))
625 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
626 amdgpu_res_cpu_visible(adev, bo->tbo.resource))
627 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
630 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
632 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
633 bo->tbo.resource->mem_type == TTM_PL_VRAM) {
634 struct dma_fence *fence;
636 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
640 dma_resv_add_fence(bo->tbo.base.resv, fence,
641 DMA_RESV_USAGE_KERNEL);
642 dma_fence_put(fence);
645 amdgpu_bo_unreserve(bo);
648 trace_amdgpu_bo_create(bo);
650 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
651 if (bp->type == ttm_bo_type_device)
652 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
658 dma_resv_unlock(bo->tbo.base.resv);
659 amdgpu_bo_unref(&bo);
664 * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
665 * @adev: amdgpu device object
666 * @bp: parameters to be used for the buffer object
667 * @ubo_ptr: pointer to the buffer object pointer
669 * Create a BO to be used by user application;
672 * 0 for success or a negative error code on failure.
675 int amdgpu_bo_create_user(struct amdgpu_device *adev,
676 struct amdgpu_bo_param *bp,
677 struct amdgpu_bo_user **ubo_ptr)
679 struct amdgpu_bo *bo_ptr;
682 bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
683 bp->destroy = &amdgpu_bo_user_destroy;
684 r = amdgpu_bo_create(adev, bp, &bo_ptr);
688 *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
693 * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
694 * @adev: amdgpu device object
695 * @bp: parameters to be used for the buffer object
696 * @vmbo_ptr: pointer to the buffer object pointer
698 * Create a BO to be for GPUVM.
701 * 0 for success or a negative error code on failure.
704 int amdgpu_bo_create_vm(struct amdgpu_device *adev,
705 struct amdgpu_bo_param *bp,
706 struct amdgpu_bo_vm **vmbo_ptr)
708 struct amdgpu_bo *bo_ptr;
711 /* bo_ptr_size will be determined by the caller and it depends on
712 * num of amdgpu_vm_pt entries.
714 BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
715 r = amdgpu_bo_create(adev, bp, &bo_ptr);
719 *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
724 * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
726 * @vmbo: BO that will be inserted into the shadow list
728 * Insert a BO to the shadow list.
730 void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
732 struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
734 mutex_lock(&adev->shadow_list_lock);
735 list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
736 vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
737 vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
738 mutex_unlock(&adev->shadow_list_lock);
742 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
744 * @shadow: &amdgpu_bo shadow to be restored
745 * @fence: dma_fence associated with the operation
747 * Copies a buffer object's shadow content back to the object.
748 * This is used for recovering a buffer from its shadow in case of a gpu
749 * reset where vram context may be lost.
752 * 0 for success or a negative error code on failure.
754 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
757 struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
758 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
759 uint64_t shadow_addr, parent_addr;
761 shadow_addr = amdgpu_bo_gpu_offset(shadow);
762 parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
764 return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
765 amdgpu_bo_size(shadow), NULL, fence,
770 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
771 * @bo: &amdgpu_bo buffer object to be mapped
772 * @ptr: kernel virtual address to be returned
774 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
775 * amdgpu_bo_kptr() to get the kernel virtual address.
778 * 0 for success or a negative error code on failure.
780 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
785 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
788 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
789 false, MAX_SCHEDULE_TIMEOUT);
793 kptr = amdgpu_bo_kptr(bo);
800 r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
805 *ptr = amdgpu_bo_kptr(bo);
811 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
812 * @bo: &amdgpu_bo buffer object
814 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
817 * the virtual address of a buffer object area.
819 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
823 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
827 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
828 * @bo: &amdgpu_bo buffer object to be unmapped
830 * Unmaps a kernel map set up by amdgpu_bo_kmap().
832 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
835 ttm_bo_kunmap(&bo->kmap);
839 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
840 * @bo: &amdgpu_bo buffer object
842 * References the contained &ttm_buffer_object.
845 * a refcounted pointer to the &amdgpu_bo buffer object.
847 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
852 ttm_bo_get(&bo->tbo);
857 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
858 * @bo: &amdgpu_bo buffer object
860 * Unreferences the contained &ttm_buffer_object and clear the pointer
862 void amdgpu_bo_unref(struct amdgpu_bo **bo)
864 struct ttm_buffer_object *tbo;
875 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
876 * @bo: &amdgpu_bo buffer object to be pinned
877 * @domain: domain to be pinned to
878 * @min_offset: the start of requested address range
879 * @max_offset: the end of requested address range
881 * Pins the buffer object according to requested domain and address range. If
882 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
883 * pin_count and pin_size accordingly.
885 * Pinning means to lock pages in memory along with keeping them at a fixed
886 * offset. It is required when a buffer can not be moved, for example, when
887 * a display buffer is being scanned out.
889 * Compared with amdgpu_bo_pin(), this function gives more flexibility on
890 * where to pin a buffer if there are specific restrictions on where a buffer
894 * 0 for success or a negative error code on failure.
896 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
897 u64 min_offset, u64 max_offset)
899 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
900 struct ttm_operation_ctx ctx = { false, false };
903 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
906 if (WARN_ON_ONCE(min_offset > max_offset))
909 /* Check domain to be pinned to against preferred domains */
910 if (bo->preferred_domains & domain)
911 domain = bo->preferred_domains & domain;
913 /* A shared bo cannot be migrated to VRAM */
914 if (bo->tbo.base.import_attach) {
915 if (domain & AMDGPU_GEM_DOMAIN_GTT)
916 domain = AMDGPU_GEM_DOMAIN_GTT;
921 if (bo->tbo.pin_count) {
922 uint32_t mem_type = bo->tbo.resource->mem_type;
923 uint32_t mem_flags = bo->tbo.resource->placement;
925 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
928 if ((mem_type == TTM_PL_VRAM) &&
929 (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
930 !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
933 ttm_bo_pin(&bo->tbo);
935 if (max_offset != 0) {
936 u64 domain_start = amdgpu_ttm_domain_start(adev,
938 WARN_ON_ONCE(max_offset <
939 (amdgpu_bo_gpu_offset(bo) - domain_start));
945 /* This assumes only APU display buffers are pinned with (VRAM|GTT).
946 * See function amdgpu_display_supported_domains()
948 domain = amdgpu_bo_get_preferred_domain(adev, domain);
950 if (bo->tbo.base.import_attach)
951 dma_buf_pin(bo->tbo.base.import_attach);
953 /* force to pin into visible video ram */
954 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
955 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
956 amdgpu_bo_placement_from_domain(bo, domain);
957 for (i = 0; i < bo->placement.num_placement; i++) {
958 unsigned int fpfn, lpfn;
960 fpfn = min_offset >> PAGE_SHIFT;
961 lpfn = max_offset >> PAGE_SHIFT;
963 if (fpfn > bo->placements[i].fpfn)
964 bo->placements[i].fpfn = fpfn;
965 if (!bo->placements[i].lpfn ||
966 (lpfn && lpfn < bo->placements[i].lpfn))
967 bo->placements[i].lpfn = lpfn;
970 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
972 dev_err(adev->dev, "%p pin failed\n", bo);
976 ttm_bo_pin(&bo->tbo);
978 domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
979 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
980 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
981 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
982 &adev->visible_pin_size);
983 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
984 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
992 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
993 * @bo: &amdgpu_bo buffer object to be pinned
994 * @domain: domain to be pinned to
996 * A simple wrapper to amdgpu_bo_pin_restricted().
997 * Provides a simpler API for buffers that do not have any strict restrictions
998 * on where a buffer must be located.
1001 * 0 for success or a negative error code on failure.
1003 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
1005 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1006 return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1010 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
1011 * @bo: &amdgpu_bo buffer object to be unpinned
1013 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
1014 * Changes placement and pin size accordingly.
1017 * 0 for success or a negative error code on failure.
1019 void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1021 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1023 ttm_bo_unpin(&bo->tbo);
1024 if (bo->tbo.pin_count)
1027 if (bo->tbo.base.import_attach)
1028 dma_buf_unpin(bo->tbo.base.import_attach);
1030 if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
1031 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
1032 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
1033 &adev->visible_pin_size);
1034 } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1035 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
1040 static const char * const amdgpu_vram_names[] = {
1057 * amdgpu_bo_init - initialize memory manager
1058 * @adev: amdgpu device object
1060 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1063 * 0 for success or a negative error code on failure.
1065 int amdgpu_bo_init(struct amdgpu_device *adev)
1067 /* On A+A platform, VRAM can be mapped as WB */
1068 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
1069 /* reserve PAT memory space to WC for VRAM */
1070 int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1071 adev->gmc.aper_size);
1074 DRM_ERROR("Unable to set WC memtype for the aperture base\n");
1078 /* Add an MTRR for the VRAM */
1079 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1080 adev->gmc.aper_size);
1083 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1084 adev->gmc.mc_vram_size >> 20,
1085 (unsigned long long)adev->gmc.aper_size >> 20);
1086 DRM_INFO("RAM width %dbits %s\n",
1087 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1088 return amdgpu_ttm_init(adev);
1092 * amdgpu_bo_fini - tear down memory manager
1093 * @adev: amdgpu device object
1095 * Reverses amdgpu_bo_init() to tear down memory manager.
1097 void amdgpu_bo_fini(struct amdgpu_device *adev)
1101 amdgpu_ttm_fini(adev);
1103 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1104 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
1105 arch_phys_wc_del(adev->gmc.vram_mtrr);
1106 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1113 * amdgpu_bo_set_tiling_flags - set tiling flags
1114 * @bo: &amdgpu_bo buffer object
1115 * @tiling_flags: new flags
1117 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1118 * kernel driver to set the tiling flags on a buffer.
1121 * 0 for success or a negative error code on failure.
1123 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1125 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1126 struct amdgpu_bo_user *ubo;
1128 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1129 if (adev->family <= AMDGPU_FAMILY_CZ &&
1130 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1133 ubo = to_amdgpu_bo_user(bo);
1134 ubo->tiling_flags = tiling_flags;
1139 * amdgpu_bo_get_tiling_flags - get tiling flags
1140 * @bo: &amdgpu_bo buffer object
1141 * @tiling_flags: returned flags
1143 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1144 * set the tiling flags on a buffer.
1146 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1148 struct amdgpu_bo_user *ubo;
1150 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1151 dma_resv_assert_held(bo->tbo.base.resv);
1152 ubo = to_amdgpu_bo_user(bo);
1155 *tiling_flags = ubo->tiling_flags;
1159 * amdgpu_bo_set_metadata - set metadata
1160 * @bo: &amdgpu_bo buffer object
1161 * @metadata: new metadata
1162 * @metadata_size: size of the new metadata
1163 * @flags: flags of the new metadata
1165 * Sets buffer object's metadata, its size and flags.
1166 * Used via GEM ioctl.
1169 * 0 for success or a negative error code on failure.
1171 int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata,
1172 u32 metadata_size, uint64_t flags)
1174 struct amdgpu_bo_user *ubo;
1177 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1178 ubo = to_amdgpu_bo_user(bo);
1179 if (!metadata_size) {
1180 if (ubo->metadata_size) {
1181 kfree(ubo->metadata);
1182 ubo->metadata = NULL;
1183 ubo->metadata_size = 0;
1188 if (metadata == NULL)
1191 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1195 kfree(ubo->metadata);
1196 ubo->metadata_flags = flags;
1197 ubo->metadata = buffer;
1198 ubo->metadata_size = metadata_size;
1204 * amdgpu_bo_get_metadata - get metadata
1205 * @bo: &amdgpu_bo buffer object
1206 * @buffer: returned metadata
1207 * @buffer_size: size of the buffer
1208 * @metadata_size: size of the returned metadata
1209 * @flags: flags of the returned metadata
1211 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1212 * less than metadata_size.
1213 * Used via GEM ioctl.
1216 * 0 for success or a negative error code on failure.
1218 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1219 size_t buffer_size, uint32_t *metadata_size,
1222 struct amdgpu_bo_user *ubo;
1224 if (!buffer && !metadata_size)
1227 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1228 ubo = to_amdgpu_bo_user(bo);
1230 *metadata_size = ubo->metadata_size;
1233 if (buffer_size < ubo->metadata_size)
1236 if (ubo->metadata_size)
1237 memcpy(buffer, ubo->metadata, ubo->metadata_size);
1241 *flags = ubo->metadata_flags;
1247 * amdgpu_bo_move_notify - notification about a memory move
1248 * @bo: pointer to a buffer object
1249 * @evict: if this move is evicting the buffer from the graphics address space
1251 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1253 * TTM driver callback which is called when ttm moves a buffer.
1255 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
1257 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1258 struct amdgpu_bo *abo;
1260 if (!amdgpu_bo_is_amdgpu_bo(bo))
1263 abo = ttm_to_amdgpu_bo(bo);
1264 amdgpu_vm_bo_invalidate(adev, abo, evict);
1266 amdgpu_bo_kunmap(abo);
1268 if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1269 bo->resource->mem_type != TTM_PL_SYSTEM)
1270 dma_buf_move_notify(abo->tbo.base.dma_buf);
1272 /* remember the eviction */
1274 atomic64_inc(&adev->num_evictions);
1277 void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
1278 struct amdgpu_mem_stats *stats)
1280 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1281 struct ttm_resource *res = bo->tbo.resource;
1282 uint64_t size = amdgpu_bo_size(bo);
1283 struct drm_gem_object *obj;
1284 unsigned int domain;
1287 /* Abort if the BO doesn't currently have a backing store */
1291 obj = &bo->tbo.base;
1292 shared = drm_gem_object_is_shared_for_memory_stats(obj);
1294 domain = amdgpu_mem_type_to_domain(res->mem_type);
1296 case AMDGPU_GEM_DOMAIN_VRAM:
1297 stats->vram += size;
1298 if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
1299 stats->visible_vram += size;
1301 stats->vram_shared += size;
1303 case AMDGPU_GEM_DOMAIN_GTT:
1306 stats->gtt_shared += size;
1308 case AMDGPU_GEM_DOMAIN_CPU:
1312 stats->cpu_shared += size;
1316 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
1317 stats->requested_vram += size;
1318 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
1319 stats->requested_visible_vram += size;
1321 if (domain != AMDGPU_GEM_DOMAIN_VRAM) {
1322 stats->evicted_vram += size;
1323 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
1324 stats->evicted_visible_vram += size;
1326 } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
1327 stats->requested_gtt += size;
1332 * amdgpu_bo_release_notify - notification about a BO being released
1333 * @bo: pointer to a buffer object
1335 * Wipes VRAM buffers whose contents should not be leaked before the
1336 * memory is released.
1338 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1340 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1341 struct dma_fence *fence = NULL;
1342 struct amdgpu_bo *abo;
1345 if (!amdgpu_bo_is_amdgpu_bo(bo))
1348 abo = ttm_to_amdgpu_bo(bo);
1350 WARN_ON(abo->vm_bo);
1353 amdgpu_amdkfd_release_notify(abo);
1355 /* We only remove the fence if the resv has individualized. */
1356 WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1357 && bo->base.resv != &bo->base._resv);
1358 if (bo->base.resv == &bo->base._resv)
1359 amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1361 if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
1362 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
1363 adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
1366 if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
1369 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true);
1371 amdgpu_bo_fence(abo, fence, false);
1372 dma_fence_put(fence);
1375 dma_resv_unlock(bo->base.resv);
1379 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1380 * @bo: pointer to a buffer object
1382 * Notifies the driver we are taking a fault on this BO and have reserved it,
1383 * also performs bookkeeping.
1384 * TTM driver callback for dealing with vm faults.
1387 * 0 for success or a negative error code on failure.
1389 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1391 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1392 struct ttm_operation_ctx ctx = { false, false };
1393 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1396 /* Remember that this BO was accessed by the CPU */
1397 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1399 if (amdgpu_res_cpu_visible(adev, bo->resource))
1402 /* Can't move a pinned BO to visible VRAM */
1403 if (abo->tbo.pin_count > 0)
1404 return VM_FAULT_SIGBUS;
1406 /* hurrah the memory is not visible ! */
1407 atomic64_inc(&adev->num_vram_cpu_page_faults);
1408 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1409 AMDGPU_GEM_DOMAIN_GTT);
1411 /* Avoid costly evictions; only set GTT as a busy placement */
1412 abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
1414 r = ttm_bo_validate(bo, &abo->placement, &ctx);
1415 if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1416 return VM_FAULT_NOPAGE;
1417 else if (unlikely(r))
1418 return VM_FAULT_SIGBUS;
1420 /* this should never happen */
1421 if (bo->resource->mem_type == TTM_PL_VRAM &&
1422 !amdgpu_res_cpu_visible(adev, bo->resource))
1423 return VM_FAULT_SIGBUS;
1425 ttm_bo_move_to_lru_tail_unlocked(bo);
1430 * amdgpu_bo_fence - add fence to buffer object
1432 * @bo: buffer object in question
1433 * @fence: fence to add
1434 * @shared: true if fence should be added shared
1437 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1440 struct dma_resv *resv = bo->tbo.base.resv;
1443 r = dma_resv_reserve_fences(resv, 1);
1445 /* As last resort on OOM we block for the fence */
1446 dma_fence_wait(fence, false);
1450 dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
1451 DMA_RESV_USAGE_WRITE);
1455 * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1457 * @adev: amdgpu device pointer
1458 * @resv: reservation object to sync to
1459 * @sync_mode: synchronization mode
1460 * @owner: fence owner
1461 * @intr: Whether the wait is interruptible
1463 * Extract the fences from the reservation object and waits for them to finish.
1466 * 0 on success, errno otherwise.
1468 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1469 enum amdgpu_sync_mode sync_mode, void *owner,
1472 struct amdgpu_sync sync;
1475 amdgpu_sync_create(&sync);
1476 amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1477 r = amdgpu_sync_wait(&sync, intr);
1478 amdgpu_sync_free(&sync);
1483 * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1484 * @bo: buffer object to wait for
1485 * @owner: fence owner
1486 * @intr: Whether the wait is interruptible
1488 * Wrapper to wait for fences in a BO.
1490 * 0 on success, errno otherwise.
1492 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1494 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1496 return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1497 AMDGPU_SYNC_NE_OWNER, owner, intr);
1501 * amdgpu_bo_gpu_offset - return GPU offset of bo
1502 * @bo: amdgpu object for which we query the offset
1504 * Note: object should either be pinned or reserved when calling this
1505 * function, it might be useful to add check for this for debugging.
1508 * current GPU offset of the object.
1510 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1512 WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
1513 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1514 !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1515 WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
1516 WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
1517 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1519 return amdgpu_bo_gpu_offset_no_check(bo);
1523 * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1524 * @bo: amdgpu object for which we query the offset
1527 * current GPU offset of the object without raising warnings.
1529 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1531 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1532 uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
1534 if (bo->tbo.resource->mem_type == TTM_PL_TT)
1535 offset = amdgpu_gmc_agp_addr(&bo->tbo);
1537 if (offset == AMDGPU_BO_INVALID_OFFSET)
1538 offset = (bo->tbo.resource->start << PAGE_SHIFT) +
1539 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
1541 return amdgpu_gmc_sign_extend(offset);
1545 * amdgpu_bo_get_preferred_domain - get preferred domain
1546 * @adev: amdgpu device object
1547 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1550 * Which of the allowed domains is preferred for allocating the BO.
1552 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
1555 if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
1556 ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
1557 domain = AMDGPU_GEM_DOMAIN_VRAM;
1558 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1559 domain = AMDGPU_GEM_DOMAIN_GTT;
1564 #if defined(CONFIG_DEBUG_FS)
1565 #define amdgpu_bo_print_flag(m, bo, flag) \
1567 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
1568 seq_printf((m), " " #flag); \
1573 * amdgpu_bo_print_info - print BO info in debugfs file
1575 * @id: Index or Id of the BO
1576 * @bo: Requested BO for printing info
1579 * Print BO information in debugfs file
1582 * Size of the BO in bytes.
1584 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1586 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1587 struct dma_buf_attachment *attachment;
1588 struct dma_buf *dma_buf;
1589 const char *placement;
1590 unsigned int pin_count;
1593 if (dma_resv_trylock(bo->tbo.base.resv)) {
1594 unsigned int domain;
1596 domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
1598 case AMDGPU_GEM_DOMAIN_VRAM:
1599 if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
1600 placement = "VRAM VISIBLE";
1604 case AMDGPU_GEM_DOMAIN_GTT:
1607 case AMDGPU_GEM_DOMAIN_CPU:
1612 dma_resv_unlock(bo->tbo.base.resv);
1614 placement = "UNKNOWN";
1617 size = amdgpu_bo_size(bo);
1618 seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1619 id, size, placement);
1621 pin_count = READ_ONCE(bo->tbo.pin_count);
1623 seq_printf(m, " pin count %d", pin_count);
1625 dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1626 attachment = READ_ONCE(bo->tbo.base.import_attach);
1629 seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino);
1631 seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino);
1633 amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1634 amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1635 amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1636 amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1637 amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1638 amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1639 amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);