2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
31 #include <drm/amdgpu_drm.h>
33 #ifdef CONFIG_MMU_NOTIFIER
34 #include <linux/mmu_notifier.h>
37 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX
38 #define AMDGPU_BO_MAX_PLACEMENTS 3
40 struct amdgpu_bo_param {
46 enum ttm_bo_type type;
48 struct dma_resv *resv;
51 /* bo virtual addresses in a vm */
52 struct amdgpu_bo_va_mapping {
53 struct amdgpu_bo_va *bo_va;
54 struct list_head list;
58 uint64_t __subtree_last;
63 /* User space allocated BO in a VM */
65 struct amdgpu_vm_bo_base base;
67 /* protected by bo being reserved */
70 /* all other members protected by the VM PD being reserved */
71 struct dma_fence *last_pt_update;
73 /* mappings for this bo_va */
74 struct list_head invalids;
75 struct list_head valids;
77 /* If the mappings are cleared or filled */
84 /* Protected by tbo.reserved */
85 u32 preferred_domains;
87 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
88 struct ttm_placement placement;
89 struct ttm_buffer_object tbo;
90 struct ttm_bo_kmap_obj kmap;
96 unsigned prime_shared_count;
97 /* per VM structure for page tables and with virtual addresses */
98 struct amdgpu_vm_bo_base *vm_bo;
99 /* Constant after initialization */
100 struct amdgpu_bo *parent;
101 struct amdgpu_bo *shadow;
103 struct amdgpu_mn *mn;
106 #ifdef CONFIG_MMU_NOTIFIER
107 struct mmu_interval_notifier notifier;
110 struct list_head shadow_list;
112 struct kgd_mem *kfd_bo;
115 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
117 return container_of(tbo, struct amdgpu_bo, tbo);
121 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
122 * @mem_type: ttm memory type
124 * Returns corresponding domain of the ttm mem_type
126 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
130 return AMDGPU_GEM_DOMAIN_VRAM;
132 return AMDGPU_GEM_DOMAIN_GTT;
134 return AMDGPU_GEM_DOMAIN_CPU;
136 return AMDGPU_GEM_DOMAIN_GDS;
138 return AMDGPU_GEM_DOMAIN_GWS;
140 return AMDGPU_GEM_DOMAIN_OA;
148 * amdgpu_bo_reserve - reserve bo
150 * @no_intr: don't return -ERESTARTSYS on pending signal
153 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
154 * a signal. Release all buffer reservations and return to user-space.
156 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
158 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
161 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
162 if (unlikely(r != 0)) {
163 if (r != -ERESTARTSYS)
164 dev_err(adev->dev, "%p reserve failed\n", bo);
170 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
172 ttm_bo_unreserve(&bo->tbo);
175 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
177 return bo->tbo.base.size;
180 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
182 return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
185 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
187 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
191 * amdgpu_bo_mmap_offset - return mmap offset of bo
192 * @bo: amdgpu object for which we query the offset
194 * Returns mmap offset of the object.
196 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
198 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
202 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
204 static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
206 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
207 unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
208 struct drm_mm_node *node = bo->tbo.mem.mm_node;
209 unsigned long pages_left;
211 if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
214 for (pages_left = bo->tbo.mem.num_pages; pages_left;
215 pages_left -= node->size, node++)
216 if (node->start < fpfn)
223 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
225 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
227 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
231 * amdgpu_bo_encrypted - test if the BO is encrypted
232 * @bo: pointer to a buffer object
234 * Return true if the buffer object is encrypted, false otherwise.
236 static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
238 return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
241 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
242 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
244 int amdgpu_bo_create(struct amdgpu_device *adev,
245 struct amdgpu_bo_param *bp,
246 struct amdgpu_bo **bo_ptr);
247 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
248 unsigned long size, int align,
249 u32 domain, struct amdgpu_bo **bo_ptr,
250 u64 *gpu_addr, void **cpu_addr);
251 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
252 unsigned long size, int align,
253 u32 domain, struct amdgpu_bo **bo_ptr,
254 u64 *gpu_addr, void **cpu_addr);
255 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
256 uint64_t offset, uint64_t size, uint32_t domain,
257 struct amdgpu_bo **bo_ptr, void **cpu_addr);
258 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
260 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
261 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
262 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
263 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
264 void amdgpu_bo_unref(struct amdgpu_bo **bo);
265 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
266 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
267 u64 min_offset, u64 max_offset);
268 void amdgpu_bo_unpin(struct amdgpu_bo *bo);
269 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
270 int amdgpu_bo_init(struct amdgpu_device *adev);
271 void amdgpu_bo_fini(struct amdgpu_device *adev);
272 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
273 struct vm_area_struct *vma);
274 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
275 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
276 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
277 uint32_t metadata_size, uint64_t flags);
278 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
279 size_t buffer_size, uint32_t *metadata_size,
281 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
283 struct ttm_resource *new_mem);
284 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
285 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
286 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
288 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
289 enum amdgpu_sync_mode sync_mode, void *owner,
291 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
292 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
293 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
294 int amdgpu_bo_validate(struct amdgpu_bo *bo);
295 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
296 struct dma_fence **fence);
297 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
304 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
306 return sa_bo->manager->gpu_addr + sa_bo->soffset;
309 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
311 return sa_bo->manager->cpu_ptr + sa_bo->soffset;
314 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
315 struct amdgpu_sa_manager *sa_manager,
316 unsigned size, u32 align, u32 domain);
317 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
318 struct amdgpu_sa_manager *sa_manager);
319 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
320 struct amdgpu_sa_manager *sa_manager);
321 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
322 struct amdgpu_sa_bo **sa_bo,
323 unsigned size, unsigned align);
324 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
325 struct amdgpu_sa_bo **sa_bo,
326 struct dma_fence *fence);
327 #if defined(CONFIG_DEBUG_FS)
328 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
330 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
332 int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
334 bool amdgpu_bo_support_uswc(u64 bo_flags);