]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drm/amdkfd: Enable over-subscription with >1 GWS queue
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gem.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_debugfs.h>
35
36 #include "amdgpu.h"
37 #include "amdgpu_display.h"
38 #include "amdgpu_xgmi.h"
39
40 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
41 {
42         struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
43
44         if (robj) {
45                 amdgpu_mn_unregister(robj);
46                 amdgpu_bo_unref(&robj);
47         }
48 }
49
50 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
51                              int alignment, u32 initial_domain,
52                              u64 flags, enum ttm_bo_type type,
53                              struct dma_resv *resv,
54                              struct drm_gem_object **obj)
55 {
56         struct amdgpu_bo *bo;
57         struct amdgpu_bo_param bp;
58         int r;
59
60         memset(&bp, 0, sizeof(bp));
61         *obj = NULL;
62
63         bp.size = size;
64         bp.byte_align = alignment;
65         bp.type = type;
66         bp.resv = resv;
67         bp.preferred_domain = initial_domain;
68 retry:
69         bp.flags = flags;
70         bp.domain = initial_domain;
71         r = amdgpu_bo_create(adev, &bp, &bo);
72         if (r) {
73                 if (r != -ERESTARTSYS) {
74                         if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
75                                 flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
76                                 goto retry;
77                         }
78
79                         if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
80                                 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
81                                 goto retry;
82                         }
83                         DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
84                                   size, initial_domain, alignment, r);
85                 }
86                 return r;
87         }
88         *obj = &bo->tbo.base;
89
90         return 0;
91 }
92
93 void amdgpu_gem_force_release(struct amdgpu_device *adev)
94 {
95         struct drm_device *ddev = adev->ddev;
96         struct drm_file *file;
97
98         mutex_lock(&ddev->filelist_mutex);
99
100         list_for_each_entry(file, &ddev->filelist, lhead) {
101                 struct drm_gem_object *gobj;
102                 int handle;
103
104                 WARN_ONCE(1, "Still active user space clients!\n");
105                 spin_lock(&file->table_lock);
106                 idr_for_each_entry(&file->object_idr, gobj, handle) {
107                         WARN_ONCE(1, "And also active allocations!\n");
108                         drm_gem_object_put_unlocked(gobj);
109                 }
110                 idr_destroy(&file->object_idr);
111                 spin_unlock(&file->table_lock);
112         }
113
114         mutex_unlock(&ddev->filelist_mutex);
115 }
116
117 /*
118  * Call from drm_gem_handle_create which appear in both new and open ioctl
119  * case.
120  */
121 int amdgpu_gem_object_open(struct drm_gem_object *obj,
122                            struct drm_file *file_priv)
123 {
124         struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
125         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
126         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
127         struct amdgpu_vm *vm = &fpriv->vm;
128         struct amdgpu_bo_va *bo_va;
129         struct mm_struct *mm;
130         int r;
131
132         mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
133         if (mm && mm != current->mm)
134                 return -EPERM;
135
136         if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
137             abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
138                 return -EPERM;
139
140         r = amdgpu_bo_reserve(abo, false);
141         if (r)
142                 return r;
143
144         bo_va = amdgpu_vm_bo_find(vm, abo);
145         if (!bo_va) {
146                 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
147         } else {
148                 ++bo_va->ref_count;
149         }
150         amdgpu_bo_unreserve(abo);
151         return 0;
152 }
153
154 void amdgpu_gem_object_close(struct drm_gem_object *obj,
155                              struct drm_file *file_priv)
156 {
157         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
158         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
159         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
160         struct amdgpu_vm *vm = &fpriv->vm;
161
162         struct amdgpu_bo_list_entry vm_pd;
163         struct list_head list, duplicates;
164         struct dma_fence *fence = NULL;
165         struct ttm_validate_buffer tv;
166         struct ww_acquire_ctx ticket;
167         struct amdgpu_bo_va *bo_va;
168         long r;
169
170         INIT_LIST_HEAD(&list);
171         INIT_LIST_HEAD(&duplicates);
172
173         tv.bo = &bo->tbo;
174         tv.num_shared = 2;
175         list_add(&tv.head, &list);
176
177         amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
178
179         r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
180         if (r) {
181                 dev_err(adev->dev, "leaking bo va because "
182                         "we fail to reserve bo (%ld)\n", r);
183                 return;
184         }
185         bo_va = amdgpu_vm_bo_find(vm, bo);
186         if (!bo_va || --bo_va->ref_count)
187                 goto out_unlock;
188
189         amdgpu_vm_bo_rmv(adev, bo_va);
190         if (!amdgpu_vm_ready(vm))
191                 goto out_unlock;
192
193         fence = dma_resv_get_excl(bo->tbo.base.resv);
194         if (fence) {
195                 amdgpu_bo_fence(bo, fence, true);
196                 fence = NULL;
197         }
198
199         r = amdgpu_vm_clear_freed(adev, vm, &fence);
200         if (r || !fence)
201                 goto out_unlock;
202
203         amdgpu_bo_fence(bo, fence, true);
204         dma_fence_put(fence);
205
206 out_unlock:
207         if (unlikely(r < 0))
208                 dev_err(adev->dev, "failed to clear page "
209                         "tables on GEM object close (%ld)\n", r);
210         ttm_eu_backoff_reservation(&ticket, &list);
211 }
212
213 /*
214  * GEM ioctls.
215  */
216 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
217                             struct drm_file *filp)
218 {
219         struct amdgpu_device *adev = dev->dev_private;
220         struct amdgpu_fpriv *fpriv = filp->driver_priv;
221         struct amdgpu_vm *vm = &fpriv->vm;
222         union drm_amdgpu_gem_create *args = data;
223         uint64_t flags = args->in.domain_flags;
224         uint64_t size = args->in.bo_size;
225         struct dma_resv *resv = NULL;
226         struct drm_gem_object *gobj;
227         uint32_t handle;
228         int r;
229
230         /* reject invalid gem flags */
231         if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
232                       AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
233                       AMDGPU_GEM_CREATE_CPU_GTT_USWC |
234                       AMDGPU_GEM_CREATE_VRAM_CLEARED |
235                       AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
236                       AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
237                       AMDGPU_GEM_CREATE_ENCRYPTED))
238
239                 return -EINVAL;
240
241         /* reject invalid gem domains */
242         if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
243                 return -EINVAL;
244
245         if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
246                 DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
247                 return -EINVAL;
248         }
249
250         /* create a gem object to contain this object in */
251         if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
252             AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
253                 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
254                         /* if gds bo is created from user space, it must be
255                          * passed to bo list
256                          */
257                         DRM_ERROR("GDS bo cannot be per-vm-bo\n");
258                         return -EINVAL;
259                 }
260                 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
261         }
262
263         if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
264                 r = amdgpu_bo_reserve(vm->root.base.bo, false);
265                 if (r)
266                         return r;
267
268                 resv = vm->root.base.bo->tbo.base.resv;
269         }
270
271         r = amdgpu_gem_object_create(adev, size, args->in.alignment,
272                                      (u32)(0xffffffff & args->in.domains),
273                                      flags, ttm_bo_type_device, resv, &gobj);
274         if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
275                 if (!r) {
276                         struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
277
278                         abo->parent = amdgpu_bo_ref(vm->root.base.bo);
279                 }
280                 amdgpu_bo_unreserve(vm->root.base.bo);
281         }
282         if (r)
283                 return r;
284
285         r = drm_gem_handle_create(filp, gobj, &handle);
286         /* drop reference from allocate - handle holds it now */
287         drm_gem_object_put_unlocked(gobj);
288         if (r)
289                 return r;
290
291         memset(args, 0, sizeof(*args));
292         args->out.handle = handle;
293         return 0;
294 }
295
296 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
297                              struct drm_file *filp)
298 {
299         struct ttm_operation_ctx ctx = { true, false };
300         struct amdgpu_device *adev = dev->dev_private;
301         struct drm_amdgpu_gem_userptr *args = data;
302         struct drm_gem_object *gobj;
303         struct amdgpu_bo *bo;
304         uint32_t handle;
305         int r;
306
307         args->addr = untagged_addr(args->addr);
308
309         if (offset_in_page(args->addr | args->size))
310                 return -EINVAL;
311
312         /* reject unknown flag values */
313         if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
314             AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
315             AMDGPU_GEM_USERPTR_REGISTER))
316                 return -EINVAL;
317
318         if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
319              !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
320
321                 /* if we want to write to it we must install a MMU notifier */
322                 return -EACCES;
323         }
324
325         /* create a gem object to contain this object in */
326         r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
327                                      0, ttm_bo_type_device, NULL, &gobj);
328         if (r)
329                 return r;
330
331         bo = gem_to_amdgpu_bo(gobj);
332         bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
333         bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
334         r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
335         if (r)
336                 goto release_object;
337
338         if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
339                 r = amdgpu_mn_register(bo, args->addr);
340                 if (r)
341                         goto release_object;
342         }
343
344         if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
345                 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
346                 if (r)
347                         goto release_object;
348
349                 r = amdgpu_bo_reserve(bo, true);
350                 if (r)
351                         goto user_pages_done;
352
353                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
354                 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
355                 amdgpu_bo_unreserve(bo);
356                 if (r)
357                         goto user_pages_done;
358         }
359
360         r = drm_gem_handle_create(filp, gobj, &handle);
361         if (r)
362                 goto user_pages_done;
363
364         args->handle = handle;
365
366 user_pages_done:
367         if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
368                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
369
370 release_object:
371         drm_gem_object_put_unlocked(gobj);
372
373         return r;
374 }
375
376 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
377                           struct drm_device *dev,
378                           uint32_t handle, uint64_t *offset_p)
379 {
380         struct drm_gem_object *gobj;
381         struct amdgpu_bo *robj;
382
383         gobj = drm_gem_object_lookup(filp, handle);
384         if (gobj == NULL) {
385                 return -ENOENT;
386         }
387         robj = gem_to_amdgpu_bo(gobj);
388         if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
389             (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
390                 drm_gem_object_put_unlocked(gobj);
391                 return -EPERM;
392         }
393         *offset_p = amdgpu_bo_mmap_offset(robj);
394         drm_gem_object_put_unlocked(gobj);
395         return 0;
396 }
397
398 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
399                           struct drm_file *filp)
400 {
401         union drm_amdgpu_gem_mmap *args = data;
402         uint32_t handle = args->in.handle;
403         memset(args, 0, sizeof(*args));
404         return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
405 }
406
407 /**
408  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
409  *
410  * @timeout_ns: timeout in ns
411  *
412  * Calculate the timeout in jiffies from an absolute timeout in ns.
413  */
414 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
415 {
416         unsigned long timeout_jiffies;
417         ktime_t timeout;
418
419         /* clamp timeout if it's to large */
420         if (((int64_t)timeout_ns) < 0)
421                 return MAX_SCHEDULE_TIMEOUT;
422
423         timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
424         if (ktime_to_ns(timeout) < 0)
425                 return 0;
426
427         timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
428         /*  clamp timeout to avoid unsigned-> signed overflow */
429         if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
430                 return MAX_SCHEDULE_TIMEOUT - 1;
431
432         return timeout_jiffies;
433 }
434
435 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
436                               struct drm_file *filp)
437 {
438         union drm_amdgpu_gem_wait_idle *args = data;
439         struct drm_gem_object *gobj;
440         struct amdgpu_bo *robj;
441         uint32_t handle = args->in.handle;
442         unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
443         int r = 0;
444         long ret;
445
446         gobj = drm_gem_object_lookup(filp, handle);
447         if (gobj == NULL) {
448                 return -ENOENT;
449         }
450         robj = gem_to_amdgpu_bo(gobj);
451         ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
452                                                   timeout);
453
454         /* ret == 0 means not signaled,
455          * ret > 0 means signaled
456          * ret < 0 means interrupted before timeout
457          */
458         if (ret >= 0) {
459                 memset(args, 0, sizeof(*args));
460                 args->out.status = (ret == 0);
461         } else
462                 r = ret;
463
464         drm_gem_object_put_unlocked(gobj);
465         return r;
466 }
467
468 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
469                                 struct drm_file *filp)
470 {
471         struct drm_amdgpu_gem_metadata *args = data;
472         struct drm_gem_object *gobj;
473         struct amdgpu_bo *robj;
474         int r = -1;
475
476         DRM_DEBUG("%d \n", args->handle);
477         gobj = drm_gem_object_lookup(filp, args->handle);
478         if (gobj == NULL)
479                 return -ENOENT;
480         robj = gem_to_amdgpu_bo(gobj);
481
482         r = amdgpu_bo_reserve(robj, false);
483         if (unlikely(r != 0))
484                 goto out;
485
486         if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
487                 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
488                 r = amdgpu_bo_get_metadata(robj, args->data.data,
489                                            sizeof(args->data.data),
490                                            &args->data.data_size_bytes,
491                                            &args->data.flags);
492         } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
493                 if (args->data.data_size_bytes > sizeof(args->data.data)) {
494                         r = -EINVAL;
495                         goto unreserve;
496                 }
497                 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
498                 if (!r)
499                         r = amdgpu_bo_set_metadata(robj, args->data.data,
500                                                    args->data.data_size_bytes,
501                                                    args->data.flags);
502         }
503
504 unreserve:
505         amdgpu_bo_unreserve(robj);
506 out:
507         drm_gem_object_put_unlocked(gobj);
508         return r;
509 }
510
511 /**
512  * amdgpu_gem_va_update_vm -update the bo_va in its VM
513  *
514  * @adev: amdgpu_device pointer
515  * @vm: vm to update
516  * @bo_va: bo_va to update
517  * @operation: map, unmap or clear
518  *
519  * Update the bo_va directly after setting its address. Errors are not
520  * vital here, so they are not reported back to userspace.
521  */
522 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
523                                     struct amdgpu_vm *vm,
524                                     struct amdgpu_bo_va *bo_va,
525                                     uint32_t operation)
526 {
527         int r;
528
529         if (!amdgpu_vm_ready(vm))
530                 return;
531
532         r = amdgpu_vm_clear_freed(adev, vm, NULL);
533         if (r)
534                 goto error;
535
536         if (operation == AMDGPU_VA_OP_MAP ||
537             operation == AMDGPU_VA_OP_REPLACE) {
538                 r = amdgpu_vm_bo_update(adev, bo_va, false);
539                 if (r)
540                         goto error;
541         }
542
543         r = amdgpu_vm_update_pdes(adev, vm, false);
544
545 error:
546         if (r && r != -ERESTARTSYS)
547                 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
548 }
549
550 /**
551  * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
552  *
553  * @adev: amdgpu_device pointer
554  * @flags: GEM UAPI flags
555  *
556  * Returns the GEM UAPI flags mapped into hardware for the ASIC.
557  */
558 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
559 {
560         uint64_t pte_flag = 0;
561
562         if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
563                 pte_flag |= AMDGPU_PTE_EXECUTABLE;
564         if (flags & AMDGPU_VM_PAGE_READABLE)
565                 pte_flag |= AMDGPU_PTE_READABLE;
566         if (flags & AMDGPU_VM_PAGE_WRITEABLE)
567                 pte_flag |= AMDGPU_PTE_WRITEABLE;
568         if (flags & AMDGPU_VM_PAGE_PRT)
569                 pte_flag |= AMDGPU_PTE_PRT;
570
571         if (adev->gmc.gmc_funcs->map_mtype)
572                 pte_flag |= amdgpu_gmc_map_mtype(adev,
573                                                  flags & AMDGPU_VM_MTYPE_MASK);
574
575         return pte_flag;
576 }
577
578 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
579                           struct drm_file *filp)
580 {
581         const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
582                 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
583                 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
584         const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
585                 AMDGPU_VM_PAGE_PRT;
586
587         struct drm_amdgpu_gem_va *args = data;
588         struct drm_gem_object *gobj;
589         struct amdgpu_device *adev = dev->dev_private;
590         struct amdgpu_fpriv *fpriv = filp->driver_priv;
591         struct amdgpu_bo *abo;
592         struct amdgpu_bo_va *bo_va;
593         struct amdgpu_bo_list_entry vm_pd;
594         struct ttm_validate_buffer tv;
595         struct ww_acquire_ctx ticket;
596         struct list_head list, duplicates;
597         uint64_t va_flags;
598         int r = 0;
599
600         if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
601                 dev_dbg(&dev->pdev->dev,
602                         "va_address 0x%LX is in reserved area 0x%LX\n",
603                         args->va_address, AMDGPU_VA_RESERVED_SIZE);
604                 return -EINVAL;
605         }
606
607         if (args->va_address >= AMDGPU_GMC_HOLE_START &&
608             args->va_address < AMDGPU_GMC_HOLE_END) {
609                 dev_dbg(&dev->pdev->dev,
610                         "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
611                         args->va_address, AMDGPU_GMC_HOLE_START,
612                         AMDGPU_GMC_HOLE_END);
613                 return -EINVAL;
614         }
615
616         args->va_address &= AMDGPU_GMC_HOLE_MASK;
617
618         if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
619                 dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
620                         args->flags);
621                 return -EINVAL;
622         }
623
624         switch (args->operation) {
625         case AMDGPU_VA_OP_MAP:
626         case AMDGPU_VA_OP_UNMAP:
627         case AMDGPU_VA_OP_CLEAR:
628         case AMDGPU_VA_OP_REPLACE:
629                 break;
630         default:
631                 dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
632                         args->operation);
633                 return -EINVAL;
634         }
635
636         INIT_LIST_HEAD(&list);
637         INIT_LIST_HEAD(&duplicates);
638         if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
639             !(args->flags & AMDGPU_VM_PAGE_PRT)) {
640                 gobj = drm_gem_object_lookup(filp, args->handle);
641                 if (gobj == NULL)
642                         return -ENOENT;
643                 abo = gem_to_amdgpu_bo(gobj);
644                 tv.bo = &abo->tbo;
645                 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
646                         tv.num_shared = 1;
647                 else
648                         tv.num_shared = 0;
649                 list_add(&tv.head, &list);
650         } else {
651                 gobj = NULL;
652                 abo = NULL;
653         }
654
655         amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
656
657         r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
658         if (r)
659                 goto error_unref;
660
661         if (abo) {
662                 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
663                 if (!bo_va) {
664                         r = -ENOENT;
665                         goto error_backoff;
666                 }
667         } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
668                 bo_va = fpriv->prt_va;
669         } else {
670                 bo_va = NULL;
671         }
672
673         switch (args->operation) {
674         case AMDGPU_VA_OP_MAP:
675                 va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
676                 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
677                                      args->offset_in_bo, args->map_size,
678                                      va_flags);
679                 break;
680         case AMDGPU_VA_OP_UNMAP:
681                 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
682                 break;
683
684         case AMDGPU_VA_OP_CLEAR:
685                 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
686                                                 args->va_address,
687                                                 args->map_size);
688                 break;
689         case AMDGPU_VA_OP_REPLACE:
690                 va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
691                 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
692                                              args->offset_in_bo, args->map_size,
693                                              va_flags);
694                 break;
695         default:
696                 break;
697         }
698         if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
699                 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
700                                         args->operation);
701
702 error_backoff:
703         ttm_eu_backoff_reservation(&ticket, &list);
704
705 error_unref:
706         drm_gem_object_put_unlocked(gobj);
707         return r;
708 }
709
710 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
711                         struct drm_file *filp)
712 {
713         struct amdgpu_device *adev = dev->dev_private;
714         struct drm_amdgpu_gem_op *args = data;
715         struct drm_gem_object *gobj;
716         struct amdgpu_vm_bo_base *base;
717         struct amdgpu_bo *robj;
718         int r;
719
720         gobj = drm_gem_object_lookup(filp, args->handle);
721         if (gobj == NULL) {
722                 return -ENOENT;
723         }
724         robj = gem_to_amdgpu_bo(gobj);
725
726         r = amdgpu_bo_reserve(robj, false);
727         if (unlikely(r))
728                 goto out;
729
730         switch (args->op) {
731         case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
732                 struct drm_amdgpu_gem_create_in info;
733                 void __user *out = u64_to_user_ptr(args->value);
734
735                 info.bo_size = robj->tbo.base.size;
736                 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
737                 info.domains = robj->preferred_domains;
738                 info.domain_flags = robj->flags;
739                 amdgpu_bo_unreserve(robj);
740                 if (copy_to_user(out, &info, sizeof(info)))
741                         r = -EFAULT;
742                 break;
743         }
744         case AMDGPU_GEM_OP_SET_PLACEMENT:
745                 if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
746                         r = -EINVAL;
747                         amdgpu_bo_unreserve(robj);
748                         break;
749                 }
750                 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
751                         r = -EPERM;
752                         amdgpu_bo_unreserve(robj);
753                         break;
754                 }
755                 for (base = robj->vm_bo; base; base = base->next)
756                         if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
757                                 amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
758                                 r = -EINVAL;
759                                 amdgpu_bo_unreserve(robj);
760                                 goto out;
761                         }
762
763
764                 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
765                                                         AMDGPU_GEM_DOMAIN_GTT |
766                                                         AMDGPU_GEM_DOMAIN_CPU);
767                 robj->allowed_domains = robj->preferred_domains;
768                 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
769                         robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
770
771                 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
772                         amdgpu_vm_bo_invalidate(adev, robj, true);
773
774                 amdgpu_bo_unreserve(robj);
775                 break;
776         default:
777                 amdgpu_bo_unreserve(robj);
778                 r = -EINVAL;
779         }
780
781 out:
782         drm_gem_object_put_unlocked(gobj);
783         return r;
784 }
785
786 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
787                             struct drm_device *dev,
788                             struct drm_mode_create_dumb *args)
789 {
790         struct amdgpu_device *adev = dev->dev_private;
791         struct drm_gem_object *gobj;
792         uint32_t handle;
793         u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
794                     AMDGPU_GEM_CREATE_CPU_GTT_USWC;
795         u32 domain;
796         int r;
797
798         /*
799          * The buffer returned from this function should be cleared, but
800          * it can only be done if the ring is enabled or we'll fail to
801          * create the buffer.
802          */
803         if (adev->mman.buffer_funcs_enabled)
804                 flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
805
806         args->pitch = amdgpu_align_pitch(adev, args->width,
807                                          DIV_ROUND_UP(args->bpp, 8), 0);
808         args->size = (u64)args->pitch * args->height;
809         args->size = ALIGN(args->size, PAGE_SIZE);
810         domain = amdgpu_bo_get_preferred_pin_domain(adev,
811                                 amdgpu_display_supported_domains(adev, flags));
812         r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
813                                      ttm_bo_type_device, NULL, &gobj);
814         if (r)
815                 return -ENOMEM;
816
817         r = drm_gem_handle_create(file_priv, gobj, &handle);
818         /* drop reference from allocate - handle holds it now */
819         drm_gem_object_put_unlocked(gobj);
820         if (r) {
821                 return r;
822         }
823         args->handle = handle;
824         return 0;
825 }
826
827 #if defined(CONFIG_DEBUG_FS)
828
829 #define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)   \
830         if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
831                 seq_printf((m), " " #flag);             \
832         }
833
834 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
835 {
836         struct drm_gem_object *gobj = ptr;
837         struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
838         struct seq_file *m = data;
839
840         struct dma_buf_attachment *attachment;
841         struct dma_buf *dma_buf;
842         unsigned domain;
843         const char *placement;
844         unsigned pin_count;
845
846         domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
847         switch (domain) {
848         case AMDGPU_GEM_DOMAIN_VRAM:
849                 placement = "VRAM";
850                 break;
851         case AMDGPU_GEM_DOMAIN_GTT:
852                 placement = " GTT";
853                 break;
854         case AMDGPU_GEM_DOMAIN_CPU:
855         default:
856                 placement = " CPU";
857                 break;
858         }
859         seq_printf(m, "\t0x%08x: %12ld byte %s",
860                    id, amdgpu_bo_size(bo), placement);
861
862         pin_count = READ_ONCE(bo->pin_count);
863         if (pin_count)
864                 seq_printf(m, " pin count %d", pin_count);
865
866         dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
867         attachment = READ_ONCE(bo->tbo.base.import_attach);
868
869         if (attachment)
870                 seq_printf(m, " imported from %p", dma_buf);
871         else if (dma_buf)
872                 seq_printf(m, " exported as %p", dma_buf);
873
874         amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
875         amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
876         amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
877         amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
878         amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
879         amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
880         amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
881         amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
882
883         seq_printf(m, "\n");
884
885         return 0;
886 }
887
888 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
889 {
890         struct drm_info_node *node = (struct drm_info_node *)m->private;
891         struct drm_device *dev = node->minor->dev;
892         struct drm_file *file;
893         int r;
894
895         r = mutex_lock_interruptible(&dev->filelist_mutex);
896         if (r)
897                 return r;
898
899         list_for_each_entry(file, &dev->filelist, lhead) {
900                 struct task_struct *task;
901
902                 /*
903                  * Although we have a valid reference on file->pid, that does
904                  * not guarantee that the task_struct who called get_pid() is
905                  * still alive (e.g. get_pid(current) => fork() => exit()).
906                  * Therefore, we need to protect this ->comm access using RCU.
907                  */
908                 rcu_read_lock();
909                 task = pid_task(file->pid, PIDTYPE_PID);
910                 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
911                            task ? task->comm : "<unknown>");
912                 rcu_read_unlock();
913
914                 spin_lock(&file->table_lock);
915                 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
916                 spin_unlock(&file->table_lock);
917         }
918
919         mutex_unlock(&dev->filelist_mutex);
920         return 0;
921 }
922
923 static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
924         {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
925 };
926 #endif
927
928 int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
929 {
930 #if defined(CONFIG_DEBUG_FS)
931         return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
932 #endif
933         return 0;
934 }
This page took 0.093815 seconds and 4 git commands to generate.