]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drm/amdgpu: abstract amdgpu_job for scheduler
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_bo_list.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <[email protected]>
29  */
30
31 #include <drm/drmP.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34
35 static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
36                                  struct amdgpu_bo_list **result,
37                                  int *id)
38 {
39         int r;
40
41         *result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
42         if (!*result)
43                 return -ENOMEM;
44
45         mutex_lock(&fpriv->bo_list_lock);
46         r = idr_alloc(&fpriv->bo_list_handles, *result,
47                       1, 0, GFP_KERNEL);
48         if (r < 0) {
49                 mutex_unlock(&fpriv->bo_list_lock);
50                 kfree(*result);
51                 return r;
52         }
53         *id = r;
54
55         mutex_init(&(*result)->lock);
56         (*result)->num_entries = 0;
57         (*result)->array = NULL;
58
59         mutex_lock(&(*result)->lock);
60         mutex_unlock(&fpriv->bo_list_lock);
61
62         return 0;
63 }
64
65 struct amdgpu_bo_list *
66 amdgpu_bo_list_clone(struct amdgpu_bo_list *list)
67 {
68         struct amdgpu_bo_list *result;
69         unsigned i;
70
71         result = kmalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
72         if (!result)
73                 return NULL;
74
75         result->array = drm_calloc_large(list->num_entries,
76                 sizeof(struct amdgpu_bo_list_entry));
77         if (!result->array) {
78                 kfree(result);
79                 return NULL;
80         }
81
82         mutex_init(&result->lock);
83         result->gds_obj = list->gds_obj;
84         result->gws_obj = list->gws_obj;
85         result->oa_obj = list->oa_obj;
86         result->has_userptr = list->has_userptr;
87         result->num_entries = list->num_entries;
88
89         memcpy(result->array, list->array, list->num_entries *
90                sizeof(struct amdgpu_bo_list_entry));
91
92         for (i = 0; i < result->num_entries; ++i)
93                 amdgpu_bo_ref(result->array[i].robj);
94
95         return result;
96 }
97
98 static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
99 {
100         struct amdgpu_bo_list *list;
101
102         mutex_lock(&fpriv->bo_list_lock);
103         list = idr_find(&fpriv->bo_list_handles, id);
104         if (list) {
105                 mutex_lock(&list->lock);
106                 idr_remove(&fpriv->bo_list_handles, id);
107                 mutex_unlock(&list->lock);
108                 amdgpu_bo_list_free(list);
109         }
110         mutex_unlock(&fpriv->bo_list_lock);
111 }
112
113 static int amdgpu_bo_list_set(struct amdgpu_device *adev,
114                                      struct drm_file *filp,
115                                      struct amdgpu_bo_list *list,
116                                      struct drm_amdgpu_bo_list_entry *info,
117                                      unsigned num_entries)
118 {
119         struct amdgpu_bo_list_entry *array;
120         struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
121         struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
122         struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
123
124         bool has_userptr = false;
125         unsigned i;
126
127         array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
128         if (!array)
129                 return -ENOMEM;
130         memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
131
132         for (i = 0; i < num_entries; ++i) {
133                 struct amdgpu_bo_list_entry *entry = &array[i];
134                 struct drm_gem_object *gobj;
135
136                 gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
137                 if (!gobj)
138                         goto error_free;
139
140                 entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
141                 drm_gem_object_unreference_unlocked(gobj);
142                 entry->priority = info[i].bo_priority;
143                 entry->prefered_domains = entry->robj->initial_domain;
144                 entry->allowed_domains = entry->prefered_domains;
145                 if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
146                         entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
147                 if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) {
148                         has_userptr = true;
149                         entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
150                         entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
151                 }
152                 entry->tv.bo = &entry->robj->tbo;
153                 entry->tv.shared = true;
154
155                 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
156                         gds_obj = entry->robj;
157                 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
158                         gws_obj = entry->robj;
159                 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
160                         oa_obj = entry->robj;
161
162                 trace_amdgpu_bo_list_set(list, entry->robj);
163         }
164
165         for (i = 0; i < list->num_entries; ++i)
166                 amdgpu_bo_unref(&list->array[i].robj);
167
168         drm_free_large(list->array);
169
170         list->gds_obj = gds_obj;
171         list->gws_obj = gws_obj;
172         list->oa_obj = oa_obj;
173         list->has_userptr = has_userptr;
174         list->array = array;
175         list->num_entries = num_entries;
176
177         return 0;
178
179 error_free:
180         drm_free_large(array);
181         return -ENOENT;
182 }
183
184 struct amdgpu_bo_list *
185 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
186 {
187         struct amdgpu_bo_list *result;
188
189         mutex_lock(&fpriv->bo_list_lock);
190         result = idr_find(&fpriv->bo_list_handles, id);
191         if (result)
192                 mutex_lock(&result->lock);
193         mutex_unlock(&fpriv->bo_list_lock);
194         return result;
195 }
196
197 void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
198 {
199         mutex_unlock(&list->lock);
200 }
201
202 void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
203 {
204         unsigned i;
205
206         for (i = 0; i < list->num_entries; ++i)
207                 amdgpu_bo_unref(&list->array[i].robj);
208
209         mutex_destroy(&list->lock);
210         drm_free_large(list->array);
211         kfree(list);
212 }
213
214 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
215                                 struct drm_file *filp)
216 {
217         const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
218
219         struct amdgpu_device *adev = dev->dev_private;
220         struct amdgpu_fpriv *fpriv = filp->driver_priv;
221         union drm_amdgpu_bo_list *args = data;
222         uint32_t handle = args->in.list_handle;
223         const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
224
225         struct drm_amdgpu_bo_list_entry *info;
226         struct amdgpu_bo_list *list;
227
228         int r;
229
230         info = drm_malloc_ab(args->in.bo_number,
231                              sizeof(struct drm_amdgpu_bo_list_entry));
232         if (!info)
233                 return -ENOMEM;
234
235         /* copy the handle array from userspace to a kernel buffer */
236         r = -EFAULT;
237         if (likely(info_size == args->in.bo_info_size)) {
238                 unsigned long bytes = args->in.bo_number *
239                         args->in.bo_info_size;
240
241                 if (copy_from_user(info, uptr, bytes))
242                         goto error_free;
243
244         } else {
245                 unsigned long bytes = min(args->in.bo_info_size, info_size);
246                 unsigned i;
247
248                 memset(info, 0, args->in.bo_number * info_size);
249                 for (i = 0; i < args->in.bo_number; ++i) {
250                         if (copy_from_user(&info[i], uptr, bytes))
251                                 goto error_free;
252                         
253                         uptr += args->in.bo_info_size;
254                 }
255         }
256
257         switch (args->in.operation) {
258         case AMDGPU_BO_LIST_OP_CREATE:
259                 r = amdgpu_bo_list_create(fpriv, &list, &handle);
260                 if (r) 
261                         goto error_free;
262
263                 r = amdgpu_bo_list_set(adev, filp, list, info,
264                                               args->in.bo_number);
265                 amdgpu_bo_list_put(list);
266                 if (r)
267                         goto error_free;
268
269                 break;
270                 
271         case AMDGPU_BO_LIST_OP_DESTROY:
272                 amdgpu_bo_list_destroy(fpriv, handle);
273                 handle = 0;
274                 break;
275
276         case AMDGPU_BO_LIST_OP_UPDATE:
277                 r = -ENOENT;
278                 list = amdgpu_bo_list_get(fpriv, handle);
279                 if (!list)
280                         goto error_free;
281
282                 r = amdgpu_bo_list_set(adev, filp, list, info,
283                                               args->in.bo_number);
284                 amdgpu_bo_list_put(list);
285                 if (r)
286                         goto error_free;
287
288                 break;
289
290         default:
291                 r = -EINVAL;
292                 goto error_free;
293         }
294
295         memset(args, 0, sizeof(*args));
296         args->out.list_handle = handle;
297         drm_free_large(info);
298
299         return 0;
300
301 error_free:
302         drm_free_large(info);
303         return r;
304 }
This page took 0.055044 seconds and 4 git commands to generate.