2 * Copyright 2015 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
33 #include "amdgpu_trace.h"
35 #define AMDGPU_BO_LIST_MAX_PRIORITY 32u
36 #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
38 static int amdgpu_bo_list_set(struct amdgpu_device *adev,
39 struct drm_file *filp,
40 struct amdgpu_bo_list *list,
41 struct drm_amdgpu_bo_list_entry *info,
42 unsigned num_entries);
44 static void amdgpu_bo_list_release_rcu(struct kref *ref)
47 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
50 for (i = 0; i < list->num_entries; ++i)
51 amdgpu_bo_unref(&list->array[i].robj);
53 mutex_destroy(&list->lock);
55 kfree_rcu(list, rhead);
58 int amdgpu_bo_list_create(struct amdgpu_device *adev,
59 struct drm_file *filp,
60 struct drm_amdgpu_bo_list_entry *info,
62 struct amdgpu_bo_list **list_out)
64 struct amdgpu_bo_list *list;
68 list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
72 /* initialize bo list*/
73 mutex_init(&list->lock);
74 kref_init(&list->refcount);
75 r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
85 static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
87 struct amdgpu_bo_list *list;
89 mutex_lock(&fpriv->bo_list_lock);
90 list = idr_remove(&fpriv->bo_list_handles, id);
91 mutex_unlock(&fpriv->bo_list_lock);
93 kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
96 static int amdgpu_bo_list_set(struct amdgpu_device *adev,
97 struct drm_file *filp,
98 struct amdgpu_bo_list *list,
99 struct drm_amdgpu_bo_list_entry *info,
100 unsigned num_entries)
102 struct amdgpu_bo_list_entry *array;
103 struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
104 struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
105 struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
107 unsigned last_entry = 0, first_userptr = num_entries;
110 unsigned long total_size = 0;
112 array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
115 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
117 for (i = 0; i < num_entries; ++i) {
118 struct amdgpu_bo_list_entry *entry;
119 struct drm_gem_object *gobj;
120 struct amdgpu_bo *bo;
121 struct mm_struct *usermm;
123 gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
129 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
130 drm_gem_object_put_unlocked(gobj);
132 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
134 if (usermm != current->mm) {
135 amdgpu_bo_unref(&bo);
139 entry = &array[--first_userptr];
141 entry = &array[last_entry++];
145 entry->priority = min(info[i].bo_priority,
146 AMDGPU_BO_LIST_MAX_PRIORITY);
147 entry->tv.bo = &entry->robj->tbo;
148 entry->tv.shared = !entry->robj->prime_shared_count;
150 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
151 gds_obj = entry->robj;
152 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
153 gws_obj = entry->robj;
154 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
155 oa_obj = entry->robj;
157 total_size += amdgpu_bo_size(entry->robj);
158 trace_amdgpu_bo_list_set(list, entry->robj);
161 for (i = 0; i < list->num_entries; ++i)
162 amdgpu_bo_unref(&list->array[i].robj);
166 list->gds_obj = gds_obj;
167 list->gws_obj = gws_obj;
168 list->oa_obj = oa_obj;
169 list->first_userptr = first_userptr;
171 list->num_entries = num_entries;
173 trace_amdgpu_cs_bo_status(list->num_entries, total_size);
178 amdgpu_bo_unref(&array[i].robj);
183 struct amdgpu_bo_list *
184 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
186 struct amdgpu_bo_list *result;
189 result = idr_find(&fpriv->bo_list_handles, id);
192 if (kref_get_unless_zero(&result->refcount)) {
194 mutex_lock(&result->lock);
206 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
207 struct list_head *validated)
209 /* This is based on the bucket sort with O(n) time complexity.
210 * An item with priority "i" is added to bucket[i]. The lists are then
211 * concatenated in descending order.
213 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
216 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
217 INIT_LIST_HEAD(&bucket[i]);
219 /* Since buffers which appear sooner in the relocation list are
220 * likely to be used more often than buffers which appear later
221 * in the list, the sort mustn't change the ordering of buffers
222 * with the same priority, i.e. it must be stable.
224 for (i = 0; i < list->num_entries; i++) {
225 unsigned priority = list->array[i].priority;
227 if (!list->array[i].robj->parent)
228 list_add_tail(&list->array[i].tv.head,
231 list->array[i].user_pages = NULL;
234 /* Connect the sorted buckets in the output list. */
235 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
236 list_splice(&bucket[i], validated);
239 void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
241 mutex_unlock(&list->lock);
242 kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
245 void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
249 for (i = 0; i < list->num_entries; ++i)
250 amdgpu_bo_unref(&list->array[i].robj);
252 mutex_destroy(&list->lock);
257 int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
258 struct drm_amdgpu_bo_list_entry **info_param)
260 const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
261 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
262 struct drm_amdgpu_bo_list_entry *info;
265 info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
269 /* copy the handle array from userspace to a kernel buffer */
271 if (likely(info_size == in->bo_info_size)) {
272 unsigned long bytes = in->bo_number *
275 if (copy_from_user(info, uptr, bytes))
279 unsigned long bytes = min(in->bo_info_size, info_size);
282 memset(info, 0, in->bo_number * info_size);
283 for (i = 0; i < in->bo_number; ++i) {
284 if (copy_from_user(&info[i], uptr, bytes))
287 uptr += in->bo_info_size;
299 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
300 struct drm_file *filp)
302 struct amdgpu_device *adev = dev->dev_private;
303 struct amdgpu_fpriv *fpriv = filp->driver_priv;
304 union drm_amdgpu_bo_list *args = data;
305 uint32_t handle = args->in.list_handle;
306 struct drm_amdgpu_bo_list_entry *info = NULL;
307 struct amdgpu_bo_list *list;
310 r = amdgpu_bo_create_list_entry_array(&args->in, &info);
314 switch (args->in.operation) {
315 case AMDGPU_BO_LIST_OP_CREATE:
316 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
321 mutex_lock(&fpriv->bo_list_lock);
322 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
323 mutex_unlock(&fpriv->bo_list_lock);
325 amdgpu_bo_list_free(list);
332 case AMDGPU_BO_LIST_OP_DESTROY:
333 amdgpu_bo_list_destroy(fpriv, handle);
337 case AMDGPU_BO_LIST_OP_UPDATE:
339 list = amdgpu_bo_list_get(fpriv, handle);
343 r = amdgpu_bo_list_set(adev, filp, list, info,
345 amdgpu_bo_list_put(list);
356 memset(args, 0, sizeof(*args));
357 args->out.list_handle = handle;