]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
Merge tag 'gvt-next-2020-11-23' of https://github.com/intel/gvt-linux into drm-intel...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gem.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32 #include <linux/dma-buf.h>
33
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_debugfs.h>
36
37 #include "amdgpu.h"
38 #include "amdgpu_display.h"
39 #include "amdgpu_dma_buf.h"
40 #include "amdgpu_xgmi.h"
41
42 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
43
44 static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
45 {
46         struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
47
48         if (robj) {
49                 amdgpu_mn_unregister(robj);
50                 amdgpu_bo_unref(&robj);
51         }
52 }
53
54 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
55                              int alignment, u32 initial_domain,
56                              u64 flags, enum ttm_bo_type type,
57                              struct dma_resv *resv,
58                              struct drm_gem_object **obj)
59 {
60         struct amdgpu_bo *bo;
61         struct amdgpu_bo_param bp;
62         int r;
63
64         memset(&bp, 0, sizeof(bp));
65         *obj = NULL;
66
67         bp.size = size;
68         bp.byte_align = alignment;
69         bp.type = type;
70         bp.resv = resv;
71         bp.preferred_domain = initial_domain;
72 retry:
73         bp.flags = flags;
74         bp.domain = initial_domain;
75         r = amdgpu_bo_create(adev, &bp, &bo);
76         if (r) {
77                 if (r != -ERESTARTSYS) {
78                         if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
79                                 flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
80                                 goto retry;
81                         }
82
83                         if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
84                                 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
85                                 goto retry;
86                         }
87                         DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
88                                   size, initial_domain, alignment, r);
89                 }
90                 return r;
91         }
92         *obj = &bo->tbo.base;
93         (*obj)->funcs = &amdgpu_gem_object_funcs;
94
95         return 0;
96 }
97
98 void amdgpu_gem_force_release(struct amdgpu_device *adev)
99 {
100         struct drm_device *ddev = adev_to_drm(adev);
101         struct drm_file *file;
102
103         mutex_lock(&ddev->filelist_mutex);
104
105         list_for_each_entry(file, &ddev->filelist, lhead) {
106                 struct drm_gem_object *gobj;
107                 int handle;
108
109                 WARN_ONCE(1, "Still active user space clients!\n");
110                 spin_lock(&file->table_lock);
111                 idr_for_each_entry(&file->object_idr, gobj, handle) {
112                         WARN_ONCE(1, "And also active allocations!\n");
113                         drm_gem_object_put(gobj);
114                 }
115                 idr_destroy(&file->object_idr);
116                 spin_unlock(&file->table_lock);
117         }
118
119         mutex_unlock(&ddev->filelist_mutex);
120 }
121
122 /*
123  * Call from drm_gem_handle_create which appear in both new and open ioctl
124  * case.
125  */
126 static int amdgpu_gem_object_open(struct drm_gem_object *obj,
127                                   struct drm_file *file_priv)
128 {
129         struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
130         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
131         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
132         struct amdgpu_vm *vm = &fpriv->vm;
133         struct amdgpu_bo_va *bo_va;
134         struct mm_struct *mm;
135         int r;
136
137         mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
138         if (mm && mm != current->mm)
139                 return -EPERM;
140
141         if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
142             abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
143                 return -EPERM;
144
145         r = amdgpu_bo_reserve(abo, false);
146         if (r)
147                 return r;
148
149         bo_va = amdgpu_vm_bo_find(vm, abo);
150         if (!bo_va) {
151                 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
152         } else {
153                 ++bo_va->ref_count;
154         }
155         amdgpu_bo_unreserve(abo);
156         return 0;
157 }
158
159 static void amdgpu_gem_object_close(struct drm_gem_object *obj,
160                                     struct drm_file *file_priv)
161 {
162         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
163         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
164         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
165         struct amdgpu_vm *vm = &fpriv->vm;
166
167         struct amdgpu_bo_list_entry vm_pd;
168         struct list_head list, duplicates;
169         struct dma_fence *fence = NULL;
170         struct ttm_validate_buffer tv;
171         struct ww_acquire_ctx ticket;
172         struct amdgpu_bo_va *bo_va;
173         long r;
174
175         INIT_LIST_HEAD(&list);
176         INIT_LIST_HEAD(&duplicates);
177
178         tv.bo = &bo->tbo;
179         tv.num_shared = 2;
180         list_add(&tv.head, &list);
181
182         amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
183
184         r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
185         if (r) {
186                 dev_err(adev->dev, "leaking bo va because "
187                         "we fail to reserve bo (%ld)\n", r);
188                 return;
189         }
190         bo_va = amdgpu_vm_bo_find(vm, bo);
191         if (!bo_va || --bo_va->ref_count)
192                 goto out_unlock;
193
194         amdgpu_vm_bo_rmv(adev, bo_va);
195         if (!amdgpu_vm_ready(vm))
196                 goto out_unlock;
197
198         fence = dma_resv_get_excl(bo->tbo.base.resv);
199         if (fence) {
200                 amdgpu_bo_fence(bo, fence, true);
201                 fence = NULL;
202         }
203
204         r = amdgpu_vm_clear_freed(adev, vm, &fence);
205         if (r || !fence)
206                 goto out_unlock;
207
208         amdgpu_bo_fence(bo, fence, true);
209         dma_fence_put(fence);
210
211 out_unlock:
212         if (unlikely(r < 0))
213                 dev_err(adev->dev, "failed to clear page "
214                         "tables on GEM object close (%ld)\n", r);
215         ttm_eu_backoff_reservation(&ticket, &list);
216 }
217
218 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
219         .free = amdgpu_gem_object_free,
220         .open = amdgpu_gem_object_open,
221         .close = amdgpu_gem_object_close,
222         .export = amdgpu_gem_prime_export,
223         .vmap = amdgpu_gem_prime_vmap,
224         .vunmap = amdgpu_gem_prime_vunmap,
225 };
226
227 /*
228  * GEM ioctls.
229  */
230 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
231                             struct drm_file *filp)
232 {
233         struct amdgpu_device *adev = drm_to_adev(dev);
234         struct amdgpu_fpriv *fpriv = filp->driver_priv;
235         struct amdgpu_vm *vm = &fpriv->vm;
236         union drm_amdgpu_gem_create *args = data;
237         uint64_t flags = args->in.domain_flags;
238         uint64_t size = args->in.bo_size;
239         struct dma_resv *resv = NULL;
240         struct drm_gem_object *gobj;
241         uint32_t handle;
242         int r;
243
244         /* reject invalid gem flags */
245         if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
246                       AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
247                       AMDGPU_GEM_CREATE_CPU_GTT_USWC |
248                       AMDGPU_GEM_CREATE_VRAM_CLEARED |
249                       AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
250                       AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
251                       AMDGPU_GEM_CREATE_ENCRYPTED))
252
253                 return -EINVAL;
254
255         /* reject invalid gem domains */
256         if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
257                 return -EINVAL;
258
259         if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
260                 DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
261                 return -EINVAL;
262         }
263
264         /* create a gem object to contain this object in */
265         if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
266             AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
267                 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
268                         /* if gds bo is created from user space, it must be
269                          * passed to bo list
270                          */
271                         DRM_ERROR("GDS bo cannot be per-vm-bo\n");
272                         return -EINVAL;
273                 }
274                 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
275         }
276
277         if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
278                 r = amdgpu_bo_reserve(vm->root.base.bo, false);
279                 if (r)
280                         return r;
281
282                 resv = vm->root.base.bo->tbo.base.resv;
283         }
284
285         r = amdgpu_gem_object_create(adev, size, args->in.alignment,
286                                      (u32)(0xffffffff & args->in.domains),
287                                      flags, ttm_bo_type_device, resv, &gobj);
288         if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
289                 if (!r) {
290                         struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
291
292                         abo->parent = amdgpu_bo_ref(vm->root.base.bo);
293                 }
294                 amdgpu_bo_unreserve(vm->root.base.bo);
295         }
296         if (r)
297                 return r;
298
299         r = drm_gem_handle_create(filp, gobj, &handle);
300         /* drop reference from allocate - handle holds it now */
301         drm_gem_object_put(gobj);
302         if (r)
303                 return r;
304
305         memset(args, 0, sizeof(*args));
306         args->out.handle = handle;
307         return 0;
308 }
309
310 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
311                              struct drm_file *filp)
312 {
313         struct ttm_operation_ctx ctx = { true, false };
314         struct amdgpu_device *adev = drm_to_adev(dev);
315         struct drm_amdgpu_gem_userptr *args = data;
316         struct drm_gem_object *gobj;
317         struct amdgpu_bo *bo;
318         uint32_t handle;
319         int r;
320
321         args->addr = untagged_addr(args->addr);
322
323         if (offset_in_page(args->addr | args->size))
324                 return -EINVAL;
325
326         /* reject unknown flag values */
327         if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
328             AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
329             AMDGPU_GEM_USERPTR_REGISTER))
330                 return -EINVAL;
331
332         if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
333              !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
334
335                 /* if we want to write to it we must install a MMU notifier */
336                 return -EACCES;
337         }
338
339         /* create a gem object to contain this object in */
340         r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
341                                      0, ttm_bo_type_device, NULL, &gobj);
342         if (r)
343                 return r;
344
345         bo = gem_to_amdgpu_bo(gobj);
346         bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
347         bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
348         r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
349         if (r)
350                 goto release_object;
351
352         if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
353                 r = amdgpu_mn_register(bo, args->addr);
354                 if (r)
355                         goto release_object;
356         }
357
358         if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
359                 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
360                 if (r)
361                         goto release_object;
362
363                 r = amdgpu_bo_reserve(bo, true);
364                 if (r)
365                         goto user_pages_done;
366
367                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
368                 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
369                 amdgpu_bo_unreserve(bo);
370                 if (r)
371                         goto user_pages_done;
372         }
373
374         r = drm_gem_handle_create(filp, gobj, &handle);
375         if (r)
376                 goto user_pages_done;
377
378         args->handle = handle;
379
380 user_pages_done:
381         if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
382                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
383
384 release_object:
385         drm_gem_object_put(gobj);
386
387         return r;
388 }
389
390 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
391                           struct drm_device *dev,
392                           uint32_t handle, uint64_t *offset_p)
393 {
394         struct drm_gem_object *gobj;
395         struct amdgpu_bo *robj;
396
397         gobj = drm_gem_object_lookup(filp, handle);
398         if (gobj == NULL) {
399                 return -ENOENT;
400         }
401         robj = gem_to_amdgpu_bo(gobj);
402         if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
403             (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
404                 drm_gem_object_put(gobj);
405                 return -EPERM;
406         }
407         *offset_p = amdgpu_bo_mmap_offset(robj);
408         drm_gem_object_put(gobj);
409         return 0;
410 }
411
412 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
413                           struct drm_file *filp)
414 {
415         union drm_amdgpu_gem_mmap *args = data;
416         uint32_t handle = args->in.handle;
417         memset(args, 0, sizeof(*args));
418         return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
419 }
420
421 /**
422  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
423  *
424  * @timeout_ns: timeout in ns
425  *
426  * Calculate the timeout in jiffies from an absolute timeout in ns.
427  */
428 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
429 {
430         unsigned long timeout_jiffies;
431         ktime_t timeout;
432
433         /* clamp timeout if it's to large */
434         if (((int64_t)timeout_ns) < 0)
435                 return MAX_SCHEDULE_TIMEOUT;
436
437         timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
438         if (ktime_to_ns(timeout) < 0)
439                 return 0;
440
441         timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
442         /*  clamp timeout to avoid unsigned-> signed overflow */
443         if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
444                 return MAX_SCHEDULE_TIMEOUT - 1;
445
446         return timeout_jiffies;
447 }
448
449 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
450                               struct drm_file *filp)
451 {
452         union drm_amdgpu_gem_wait_idle *args = data;
453         struct drm_gem_object *gobj;
454         struct amdgpu_bo *robj;
455         uint32_t handle = args->in.handle;
456         unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
457         int r = 0;
458         long ret;
459
460         gobj = drm_gem_object_lookup(filp, handle);
461         if (gobj == NULL) {
462                 return -ENOENT;
463         }
464         robj = gem_to_amdgpu_bo(gobj);
465         ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
466                                                   timeout);
467
468         /* ret == 0 means not signaled,
469          * ret > 0 means signaled
470          * ret < 0 means interrupted before timeout
471          */
472         if (ret >= 0) {
473                 memset(args, 0, sizeof(*args));
474                 args->out.status = (ret == 0);
475         } else
476                 r = ret;
477
478         drm_gem_object_put(gobj);
479         return r;
480 }
481
482 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
483                                 struct drm_file *filp)
484 {
485         struct drm_amdgpu_gem_metadata *args = data;
486         struct drm_gem_object *gobj;
487         struct amdgpu_bo *robj;
488         int r = -1;
489
490         DRM_DEBUG("%d \n", args->handle);
491         gobj = drm_gem_object_lookup(filp, args->handle);
492         if (gobj == NULL)
493                 return -ENOENT;
494         robj = gem_to_amdgpu_bo(gobj);
495
496         r = amdgpu_bo_reserve(robj, false);
497         if (unlikely(r != 0))
498                 goto out;
499
500         if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
501                 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
502                 r = amdgpu_bo_get_metadata(robj, args->data.data,
503                                            sizeof(args->data.data),
504                                            &args->data.data_size_bytes,
505                                            &args->data.flags);
506         } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
507                 if (args->data.data_size_bytes > sizeof(args->data.data)) {
508                         r = -EINVAL;
509                         goto unreserve;
510                 }
511                 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
512                 if (!r)
513                         r = amdgpu_bo_set_metadata(robj, args->data.data,
514                                                    args->data.data_size_bytes,
515                                                    args->data.flags);
516         }
517
518 unreserve:
519         amdgpu_bo_unreserve(robj);
520 out:
521         drm_gem_object_put(gobj);
522         return r;
523 }
524
525 /**
526  * amdgpu_gem_va_update_vm -update the bo_va in its VM
527  *
528  * @adev: amdgpu_device pointer
529  * @vm: vm to update
530  * @bo_va: bo_va to update
531  * @operation: map, unmap or clear
532  *
533  * Update the bo_va directly after setting its address. Errors are not
534  * vital here, so they are not reported back to userspace.
535  */
536 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
537                                     struct amdgpu_vm *vm,
538                                     struct amdgpu_bo_va *bo_va,
539                                     uint32_t operation)
540 {
541         int r;
542
543         if (!amdgpu_vm_ready(vm))
544                 return;
545
546         r = amdgpu_vm_clear_freed(adev, vm, NULL);
547         if (r)
548                 goto error;
549
550         if (operation == AMDGPU_VA_OP_MAP ||
551             operation == AMDGPU_VA_OP_REPLACE) {
552                 r = amdgpu_vm_bo_update(adev, bo_va, false);
553                 if (r)
554                         goto error;
555         }
556
557         r = amdgpu_vm_update_pdes(adev, vm, false);
558
559 error:
560         if (r && r != -ERESTARTSYS)
561                 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
562 }
563
564 /**
565  * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
566  *
567  * @adev: amdgpu_device pointer
568  * @flags: GEM UAPI flags
569  *
570  * Returns the GEM UAPI flags mapped into hardware for the ASIC.
571  */
572 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
573 {
574         uint64_t pte_flag = 0;
575
576         if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
577                 pte_flag |= AMDGPU_PTE_EXECUTABLE;
578         if (flags & AMDGPU_VM_PAGE_READABLE)
579                 pte_flag |= AMDGPU_PTE_READABLE;
580         if (flags & AMDGPU_VM_PAGE_WRITEABLE)
581                 pte_flag |= AMDGPU_PTE_WRITEABLE;
582         if (flags & AMDGPU_VM_PAGE_PRT)
583                 pte_flag |= AMDGPU_PTE_PRT;
584
585         if (adev->gmc.gmc_funcs->map_mtype)
586                 pte_flag |= amdgpu_gmc_map_mtype(adev,
587                                                  flags & AMDGPU_VM_MTYPE_MASK);
588
589         return pte_flag;
590 }
591
592 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
593                           struct drm_file *filp)
594 {
595         const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
596                 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
597                 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
598         const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
599                 AMDGPU_VM_PAGE_PRT;
600
601         struct drm_amdgpu_gem_va *args = data;
602         struct drm_gem_object *gobj;
603         struct amdgpu_device *adev = drm_to_adev(dev);
604         struct amdgpu_fpriv *fpriv = filp->driver_priv;
605         struct amdgpu_bo *abo;
606         struct amdgpu_bo_va *bo_va;
607         struct amdgpu_bo_list_entry vm_pd;
608         struct ttm_validate_buffer tv;
609         struct ww_acquire_ctx ticket;
610         struct list_head list, duplicates;
611         uint64_t va_flags;
612         uint64_t vm_size;
613         int r = 0;
614
615         if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
616                 dev_dbg(&dev->pdev->dev,
617                         "va_address 0x%LX is in reserved area 0x%LX\n",
618                         args->va_address, AMDGPU_VA_RESERVED_SIZE);
619                 return -EINVAL;
620         }
621
622         if (args->va_address >= AMDGPU_GMC_HOLE_START &&
623             args->va_address < AMDGPU_GMC_HOLE_END) {
624                 dev_dbg(&dev->pdev->dev,
625                         "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
626                         args->va_address, AMDGPU_GMC_HOLE_START,
627                         AMDGPU_GMC_HOLE_END);
628                 return -EINVAL;
629         }
630
631         args->va_address &= AMDGPU_GMC_HOLE_MASK;
632
633         vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
634         vm_size -= AMDGPU_VA_RESERVED_SIZE;
635         if (args->va_address + args->map_size > vm_size) {
636                 dev_dbg(&dev->pdev->dev,
637                         "va_address 0x%llx is in top reserved area 0x%llx\n",
638                         args->va_address + args->map_size, vm_size);
639                 return -EINVAL;
640         }
641
642         if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
643                 dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
644                         args->flags);
645                 return -EINVAL;
646         }
647
648         switch (args->operation) {
649         case AMDGPU_VA_OP_MAP:
650         case AMDGPU_VA_OP_UNMAP:
651         case AMDGPU_VA_OP_CLEAR:
652         case AMDGPU_VA_OP_REPLACE:
653                 break;
654         default:
655                 dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
656                         args->operation);
657                 return -EINVAL;
658         }
659
660         INIT_LIST_HEAD(&list);
661         INIT_LIST_HEAD(&duplicates);
662         if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
663             !(args->flags & AMDGPU_VM_PAGE_PRT)) {
664                 gobj = drm_gem_object_lookup(filp, args->handle);
665                 if (gobj == NULL)
666                         return -ENOENT;
667                 abo = gem_to_amdgpu_bo(gobj);
668                 tv.bo = &abo->tbo;
669                 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
670                         tv.num_shared = 1;
671                 else
672                         tv.num_shared = 0;
673                 list_add(&tv.head, &list);
674         } else {
675                 gobj = NULL;
676                 abo = NULL;
677         }
678
679         amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
680
681         r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
682         if (r)
683                 goto error_unref;
684
685         if (abo) {
686                 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
687                 if (!bo_va) {
688                         r = -ENOENT;
689                         goto error_backoff;
690                 }
691         } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
692                 bo_va = fpriv->prt_va;
693         } else {
694                 bo_va = NULL;
695         }
696
697         switch (args->operation) {
698         case AMDGPU_VA_OP_MAP:
699                 va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
700                 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
701                                      args->offset_in_bo, args->map_size,
702                                      va_flags);
703                 break;
704         case AMDGPU_VA_OP_UNMAP:
705                 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
706                 break;
707
708         case AMDGPU_VA_OP_CLEAR:
709                 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
710                                                 args->va_address,
711                                                 args->map_size);
712                 break;
713         case AMDGPU_VA_OP_REPLACE:
714                 va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
715                 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
716                                              args->offset_in_bo, args->map_size,
717                                              va_flags);
718                 break;
719         default:
720                 break;
721         }
722         if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
723                 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
724                                         args->operation);
725
726 error_backoff:
727         ttm_eu_backoff_reservation(&ticket, &list);
728
729 error_unref:
730         drm_gem_object_put(gobj);
731         return r;
732 }
733
734 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
735                         struct drm_file *filp)
736 {
737         struct amdgpu_device *adev = drm_to_adev(dev);
738         struct drm_amdgpu_gem_op *args = data;
739         struct drm_gem_object *gobj;
740         struct amdgpu_vm_bo_base *base;
741         struct amdgpu_bo *robj;
742         int r;
743
744         gobj = drm_gem_object_lookup(filp, args->handle);
745         if (gobj == NULL) {
746                 return -ENOENT;
747         }
748         robj = gem_to_amdgpu_bo(gobj);
749
750         r = amdgpu_bo_reserve(robj, false);
751         if (unlikely(r))
752                 goto out;
753
754         switch (args->op) {
755         case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
756                 struct drm_amdgpu_gem_create_in info;
757                 void __user *out = u64_to_user_ptr(args->value);
758
759                 info.bo_size = robj->tbo.base.size;
760                 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
761                 info.domains = robj->preferred_domains;
762                 info.domain_flags = robj->flags;
763                 amdgpu_bo_unreserve(robj);
764                 if (copy_to_user(out, &info, sizeof(info)))
765                         r = -EFAULT;
766                 break;
767         }
768         case AMDGPU_GEM_OP_SET_PLACEMENT:
769                 if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
770                         r = -EINVAL;
771                         amdgpu_bo_unreserve(robj);
772                         break;
773                 }
774                 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
775                         r = -EPERM;
776                         amdgpu_bo_unreserve(robj);
777                         break;
778                 }
779                 for (base = robj->vm_bo; base; base = base->next)
780                         if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
781                                 amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
782                                 r = -EINVAL;
783                                 amdgpu_bo_unreserve(robj);
784                                 goto out;
785                         }
786
787
788                 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
789                                                         AMDGPU_GEM_DOMAIN_GTT |
790                                                         AMDGPU_GEM_DOMAIN_CPU);
791                 robj->allowed_domains = robj->preferred_domains;
792                 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
793                         robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
794
795                 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
796                         amdgpu_vm_bo_invalidate(adev, robj, true);
797
798                 amdgpu_bo_unreserve(robj);
799                 break;
800         default:
801                 amdgpu_bo_unreserve(robj);
802                 r = -EINVAL;
803         }
804
805 out:
806         drm_gem_object_put(gobj);
807         return r;
808 }
809
810 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
811                             struct drm_device *dev,
812                             struct drm_mode_create_dumb *args)
813 {
814         struct amdgpu_device *adev = drm_to_adev(dev);
815         struct drm_gem_object *gobj;
816         uint32_t handle;
817         u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
818                     AMDGPU_GEM_CREATE_CPU_GTT_USWC;
819         u32 domain;
820         int r;
821
822         /*
823          * The buffer returned from this function should be cleared, but
824          * it can only be done if the ring is enabled or we'll fail to
825          * create the buffer.
826          */
827         if (adev->mman.buffer_funcs_enabled)
828                 flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
829
830         args->pitch = amdgpu_align_pitch(adev, args->width,
831                                          DIV_ROUND_UP(args->bpp, 8), 0);
832         args->size = (u64)args->pitch * args->height;
833         args->size = ALIGN(args->size, PAGE_SIZE);
834         domain = amdgpu_bo_get_preferred_pin_domain(adev,
835                                 amdgpu_display_supported_domains(adev, flags));
836         r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
837                                      ttm_bo_type_device, NULL, &gobj);
838         if (r)
839                 return -ENOMEM;
840
841         r = drm_gem_handle_create(file_priv, gobj, &handle);
842         /* drop reference from allocate - handle holds it now */
843         drm_gem_object_put(gobj);
844         if (r) {
845                 return r;
846         }
847         args->handle = handle;
848         return 0;
849 }
850
851 #if defined(CONFIG_DEBUG_FS)
852
853 #define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)   \
854         if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
855                 seq_printf((m), " " #flag);             \
856         }
857
858 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
859 {
860         struct drm_gem_object *gobj = ptr;
861         struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
862         struct seq_file *m = data;
863
864         struct dma_buf_attachment *attachment;
865         struct dma_buf *dma_buf;
866         unsigned domain;
867         const char *placement;
868         unsigned pin_count;
869
870         domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
871         switch (domain) {
872         case AMDGPU_GEM_DOMAIN_VRAM:
873                 placement = "VRAM";
874                 break;
875         case AMDGPU_GEM_DOMAIN_GTT:
876                 placement = " GTT";
877                 break;
878         case AMDGPU_GEM_DOMAIN_CPU:
879         default:
880                 placement = " CPU";
881                 break;
882         }
883         seq_printf(m, "\t0x%08x: %12ld byte %s",
884                    id, amdgpu_bo_size(bo), placement);
885
886         pin_count = READ_ONCE(bo->tbo.pin_count);
887         if (pin_count)
888                 seq_printf(m, " pin count %d", pin_count);
889
890         dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
891         attachment = READ_ONCE(bo->tbo.base.import_attach);
892
893         if (attachment)
894                 seq_printf(m, " imported from %p%s", dma_buf,
895                            attachment->peer2peer ? " P2P" : "");
896         else if (dma_buf)
897                 seq_printf(m, " exported as %p", dma_buf);
898
899         amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
900         amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
901         amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
902         amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
903         amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
904         amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
905         amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
906         amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
907
908         seq_printf(m, "\n");
909
910         return 0;
911 }
912
913 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
914 {
915         struct drm_info_node *node = (struct drm_info_node *)m->private;
916         struct drm_device *dev = node->minor->dev;
917         struct drm_file *file;
918         int r;
919
920         r = mutex_lock_interruptible(&dev->filelist_mutex);
921         if (r)
922                 return r;
923
924         list_for_each_entry(file, &dev->filelist, lhead) {
925                 struct task_struct *task;
926
927                 /*
928                  * Although we have a valid reference on file->pid, that does
929                  * not guarantee that the task_struct who called get_pid() is
930                  * still alive (e.g. get_pid(current) => fork() => exit()).
931                  * Therefore, we need to protect this ->comm access using RCU.
932                  */
933                 rcu_read_lock();
934                 task = pid_task(file->pid, PIDTYPE_PID);
935                 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
936                            task ? task->comm : "<unknown>");
937                 rcu_read_unlock();
938
939                 spin_lock(&file->table_lock);
940                 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
941                 spin_unlock(&file->table_lock);
942         }
943
944         mutex_unlock(&dev->filelist_mutex);
945         return 0;
946 }
947
948 static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
949         {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
950 };
951 #endif
952
953 int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
954 {
955 #if defined(CONFIG_DEBUG_FS)
956         return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list,
957                                         ARRAY_SIZE(amdgpu_debugfs_gem_list));
958 #endif
959         return 0;
960 }
This page took 0.094134 seconds and 4 git commands to generate.