2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
31 #include <drm/amdgpu_drm.h>
33 #include "amdgpu_res_cursor.h"
35 #ifdef CONFIG_MMU_NOTIFIER
36 #include <linux/mmu_notifier.h>
39 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX
40 #define AMDGPU_BO_MAX_PLACEMENTS 3
42 /* BO flag to indicate a KFD userptr BO */
43 #define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63)
45 #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
46 #define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
48 struct amdgpu_bo_param {
55 enum ttm_bo_type type;
57 struct dma_resv *resv;
58 void (*destroy)(struct ttm_buffer_object *bo);
59 /* xcp partition number plus 1, 0 means any partition */
63 /* bo virtual addresses in a vm */
64 struct amdgpu_bo_va_mapping {
65 struct amdgpu_bo_va *bo_va;
66 struct list_head list;
70 uint64_t __subtree_last;
75 /* User space allocated BO in a VM */
77 struct amdgpu_vm_bo_base base;
79 /* protected by bo being reserved */
82 /* all other members protected by the VM PD being reserved */
83 struct dma_fence *last_pt_update;
85 /* mappings for this bo_va */
86 struct list_head invalids;
87 struct list_head valids;
89 /* If the mappings are cleared or filled */
95 * protected by vm reservation lock
96 * if non-zero, cannot unmap from GPU because user queues may still access it
98 unsigned int queue_refcount;
102 /* Protected by tbo.reserved */
103 u32 preferred_domains;
105 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
106 struct ttm_placement placement;
107 struct ttm_buffer_object tbo;
108 struct ttm_bo_kmap_obj kmap;
110 /* per VM structure for page tables and with virtual addresses */
111 struct amdgpu_vm_bo_base *vm_bo;
112 /* Constant after initialization */
113 struct amdgpu_bo *parent;
115 #ifdef CONFIG_MMU_NOTIFIER
116 struct mmu_interval_notifier notifier;
118 struct kgd_mem *kfd_bo;
121 * For GPUs with spatial partitioning, xcp partition number, -1 means
122 * any partition. For other ASICs without spatial partition, always 0
123 * for memory accounting.
128 struct amdgpu_bo_user {
137 struct amdgpu_bo_vm {
139 struct amdgpu_bo *shadow;
140 struct list_head shadow_list;
141 struct amdgpu_vm_bo_base entries[];
144 struct amdgpu_mem_stats {
145 /* current VRAM usage, includes visible VRAM */
147 /* current shared VRAM usage, includes visible VRAM */
148 uint64_t vram_shared;
149 /* current visible VRAM usage */
150 uint64_t visible_vram;
151 /* current GTT usage */
153 /* current shared GTT usage */
155 /* current system memory usage */
157 /* current shared system memory usage */
159 /* sum of evicted buffers, includes visible VRAM */
160 uint64_t evicted_vram;
161 /* sum of evicted buffers due to CPU access */
162 uint64_t evicted_visible_vram;
163 /* how much userspace asked for, includes vis.VRAM */
164 uint64_t requested_vram;
165 /* how much userspace asked for */
166 uint64_t requested_visible_vram;
167 /* how much userspace asked for */
168 uint64_t requested_gtt;
171 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
173 return container_of(tbo, struct amdgpu_bo, tbo);
177 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
178 * @mem_type: ttm memory type
180 * Returns corresponding domain of the ttm mem_type
182 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
186 return AMDGPU_GEM_DOMAIN_VRAM;
188 return AMDGPU_GEM_DOMAIN_GTT;
190 return AMDGPU_GEM_DOMAIN_CPU;
192 return AMDGPU_GEM_DOMAIN_GDS;
194 return AMDGPU_GEM_DOMAIN_GWS;
196 return AMDGPU_GEM_DOMAIN_OA;
197 case AMDGPU_PL_DOORBELL:
198 return AMDGPU_GEM_DOMAIN_DOORBELL;
206 * amdgpu_bo_reserve - reserve bo
208 * @no_intr: don't return -ERESTARTSYS on pending signal
211 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
212 * a signal. Release all buffer reservations and return to user-space.
214 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
216 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
219 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
220 if (unlikely(r != 0)) {
221 if (r != -ERESTARTSYS)
222 dev_err(adev->dev, "%p reserve failed\n", bo);
228 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
230 ttm_bo_unreserve(&bo->tbo);
233 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
235 return bo->tbo.base.size;
238 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
240 return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
243 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
245 return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
249 * amdgpu_bo_mmap_offset - return mmap offset of bo
250 * @bo: amdgpu object for which we query the offset
252 * Returns mmap offset of the object.
254 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
256 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
260 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
262 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
264 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
268 * amdgpu_bo_encrypted - test if the BO is encrypted
269 * @bo: pointer to a buffer object
271 * Return true if the buffer object is encrypted, false otherwise.
273 static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
275 return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
279 * amdgpu_bo_shadowed - check if the BO is shadowed
281 * @bo: BO to be tested.
284 * NULL if not shadowed or else return a BO pointer.
286 static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo)
288 if (bo->tbo.type == ttm_bo_type_kernel)
289 return to_amdgpu_bo_vm(bo)->shadow;
294 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
295 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
297 int amdgpu_bo_create(struct amdgpu_device *adev,
298 struct amdgpu_bo_param *bp,
299 struct amdgpu_bo **bo_ptr);
300 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
301 unsigned long size, int align,
302 u32 domain, struct amdgpu_bo **bo_ptr,
303 u64 *gpu_addr, void **cpu_addr);
304 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
305 unsigned long size, int align,
306 u32 domain, struct amdgpu_bo **bo_ptr,
307 u64 *gpu_addr, void **cpu_addr);
308 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
309 uint64_t offset, uint64_t size,
310 struct amdgpu_bo **bo_ptr, void **cpu_addr);
311 int amdgpu_bo_create_user(struct amdgpu_device *adev,
312 struct amdgpu_bo_param *bp,
313 struct amdgpu_bo_user **ubo_ptr);
314 int amdgpu_bo_create_vm(struct amdgpu_device *adev,
315 struct amdgpu_bo_param *bp,
316 struct amdgpu_bo_vm **ubo_ptr);
317 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
319 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
320 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
321 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
322 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
323 void amdgpu_bo_unref(struct amdgpu_bo **bo);
324 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
325 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
326 u64 min_offset, u64 max_offset);
327 void amdgpu_bo_unpin(struct amdgpu_bo *bo);
328 int amdgpu_bo_init(struct amdgpu_device *adev);
329 void amdgpu_bo_fini(struct amdgpu_device *adev);
330 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
331 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
332 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
333 uint32_t metadata_size, uint64_t flags);
334 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
335 size_t buffer_size, uint32_t *metadata_size,
337 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
339 struct ttm_resource *new_mem);
340 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
341 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
342 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
344 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
345 enum amdgpu_sync_mode sync_mode, void *owner,
347 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
348 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
349 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
350 void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
351 struct amdgpu_mem_stats *stats);
352 void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
353 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
354 struct dma_fence **fence);
355 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
361 static inline struct amdgpu_sa_manager *
362 to_amdgpu_sa_manager(struct drm_suballoc_manager *manager)
364 return container_of(manager, struct amdgpu_sa_manager, base);
367 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
369 return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr +
370 drm_suballoc_soffset(sa_bo);
373 static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
375 return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr +
376 drm_suballoc_soffset(sa_bo);
379 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
380 struct amdgpu_sa_manager *sa_manager,
381 unsigned size, u32 align, u32 domain);
382 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
383 struct amdgpu_sa_manager *sa_manager);
384 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
385 struct amdgpu_sa_manager *sa_manager);
386 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
387 struct drm_suballoc **sa_bo,
389 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
390 struct drm_suballoc **sa_bo,
391 struct dma_fence *fence);
392 #if defined(CONFIG_DEBUG_FS)
393 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
395 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
397 void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
399 bool amdgpu_bo_support_uswc(u64 bo_flags);