2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
31 #include <drm/amdgpu_drm.h>
34 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX
36 struct amdgpu_bo_va_mapping {
37 struct list_head list;
41 uint64_t __subtree_last;
46 /* bo virtual addresses in a specific vm */
48 /* protected by bo being reserved */
49 struct list_head bo_list;
50 struct dma_fence *last_pt_update;
53 /* protected by vm mutex and spinlock */
54 struct list_head vm_status;
56 /* mappings for this bo_va */
57 struct list_head invalids;
58 struct list_head valids;
60 /* constant after initialization */
67 /* Protected by tbo.reserved */
70 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
71 struct ttm_placement placement;
72 struct ttm_buffer_object tbo;
73 struct ttm_bo_kmap_obj kmap;
80 unsigned prime_shared_count;
81 /* list of all virtual address to which this bo is associated to */
83 /* Constant after initialization */
84 struct drm_gem_object gem_base;
85 struct amdgpu_bo *parent;
86 struct amdgpu_bo *shadow;
88 struct ttm_bo_kmap_obj dma_buf_vmap;
90 struct list_head mn_list;
91 struct list_head shadow_list;
95 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
96 * @mem_type: ttm memory type
98 * Returns corresponding domain of the ttm mem_type
100 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
104 return AMDGPU_GEM_DOMAIN_VRAM;
106 return AMDGPU_GEM_DOMAIN_GTT;
108 return AMDGPU_GEM_DOMAIN_CPU;
110 return AMDGPU_GEM_DOMAIN_GDS;
112 return AMDGPU_GEM_DOMAIN_GWS;
114 return AMDGPU_GEM_DOMAIN_OA;
122 * amdgpu_bo_reserve - reserve bo
124 * @no_intr: don't return -ERESTARTSYS on pending signal
127 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
128 * a signal. Release all buffer reservations and return to user-space.
130 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
132 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
135 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
136 if (unlikely(r != 0)) {
137 if (r != -ERESTARTSYS)
138 dev_err(adev->dev, "%p reserve failed\n", bo);
144 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
146 ttm_bo_unreserve(&bo->tbo);
149 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
151 return bo->tbo.num_pages << PAGE_SHIFT;
154 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
156 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
159 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
161 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
165 * amdgpu_bo_mmap_offset - return mmap offset of bo
166 * @bo: amdgpu object for which we query the offset
168 * Returns mmap offset of the object.
170 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
172 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
176 * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
177 * is accessible to the GPU.
179 static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
181 switch (bo->tbo.mem.mem_type) {
182 case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm);
183 case TTM_PL_VRAM: return true;
184 default: return false;
188 int amdgpu_bo_create(struct amdgpu_device *adev,
189 unsigned long size, int byte_align,
190 bool kernel, u32 domain, u64 flags,
192 struct reservation_object *resv,
193 struct amdgpu_bo **bo_ptr);
194 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
195 unsigned long size, int byte_align,
196 bool kernel, u32 domain, u64 flags,
198 struct ttm_placement *placement,
199 struct reservation_object *resv,
200 struct amdgpu_bo **bo_ptr);
201 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
202 unsigned long size, int align,
203 u32 domain, struct amdgpu_bo **bo_ptr,
204 u64 *gpu_addr, void **cpu_addr);
205 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
207 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
208 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
209 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
210 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
211 void amdgpu_bo_unref(struct amdgpu_bo **bo);
212 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
213 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
214 u64 min_offset, u64 max_offset,
216 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
217 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
218 int amdgpu_bo_init(struct amdgpu_device *adev);
219 void amdgpu_bo_fini(struct amdgpu_device *adev);
220 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
221 struct vm_area_struct *vma);
222 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
223 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
224 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
225 uint32_t metadata_size, uint64_t flags);
226 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
227 size_t buffer_size, uint32_t *metadata_size,
229 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
231 struct ttm_mem_reg *new_mem);
232 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
233 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
235 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
236 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
237 struct amdgpu_ring *ring,
238 struct amdgpu_bo *bo,
239 struct reservation_object *resv,
240 struct dma_fence **fence, bool direct);
241 int amdgpu_bo_validate(struct amdgpu_bo *bo);
242 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
243 struct amdgpu_ring *ring,
244 struct amdgpu_bo *bo,
245 struct reservation_object *resv,
246 struct dma_fence **fence,
254 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
256 return sa_bo->manager->gpu_addr + sa_bo->soffset;
259 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
261 return sa_bo->manager->cpu_ptr + sa_bo->soffset;
264 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
265 struct amdgpu_sa_manager *sa_manager,
266 unsigned size, u32 align, u32 domain);
267 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
268 struct amdgpu_sa_manager *sa_manager);
269 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
270 struct amdgpu_sa_manager *sa_manager);
271 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
272 struct amdgpu_sa_manager *sa_manager);
273 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
274 struct amdgpu_sa_bo **sa_bo,
275 unsigned size, unsigned align);
276 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
277 struct amdgpu_sa_bo **sa_bo,
278 struct dma_fence *fence);
279 #if defined(CONFIG_DEBUG_FS)
280 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,