]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
Merge branch '2016-02-26-st-drm-next' of http://git.linaro.org/people/benjamin.gaigna...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gem.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <drm/drmP.h>
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu.h"
32
33 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
34 {
35         struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
36
37         if (robj) {
38                 if (robj->gem_base.import_attach)
39                         drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
40                 amdgpu_mn_unregister(robj);
41                 amdgpu_bo_unref(&robj);
42         }
43 }
44
45 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
46                                 int alignment, u32 initial_domain,
47                                 u64 flags, bool kernel,
48                                 struct drm_gem_object **obj)
49 {
50         struct amdgpu_bo *robj;
51         unsigned long max_size;
52         int r;
53
54         *obj = NULL;
55         /* At least align on page size */
56         if (alignment < PAGE_SIZE) {
57                 alignment = PAGE_SIZE;
58         }
59
60         if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
61                 /* Maximum bo size is the unpinned gtt size since we use the gtt to
62                  * handle vram to system pool migrations.
63                  */
64                 max_size = adev->mc.gtt_size - adev->gart_pin_size;
65                 if (size > max_size) {
66                         DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
67                                   size >> 20, max_size >> 20);
68                         return -ENOMEM;
69                 }
70         }
71 retry:
72         r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
73                              flags, NULL, NULL, &robj);
74         if (r) {
75                 if (r != -ERESTARTSYS) {
76                         if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
77                                 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
78                                 goto retry;
79                         }
80                         DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
81                                   size, initial_domain, alignment, r);
82                 }
83                 return r;
84         }
85         *obj = &robj->gem_base;
86
87         return 0;
88 }
89
90 void amdgpu_gem_force_release(struct amdgpu_device *adev)
91 {
92         struct drm_device *ddev = adev->ddev;
93         struct drm_file *file;
94
95         mutex_lock(&ddev->struct_mutex);
96
97         list_for_each_entry(file, &ddev->filelist, lhead) {
98                 struct drm_gem_object *gobj;
99                 int handle;
100
101                 WARN_ONCE(1, "Still active user space clients!\n");
102                 spin_lock(&file->table_lock);
103                 idr_for_each_entry(&file->object_idr, gobj, handle) {
104                         WARN_ONCE(1, "And also active allocations!\n");
105                         drm_gem_object_unreference(gobj);
106                 }
107                 idr_destroy(&file->object_idr);
108                 spin_unlock(&file->table_lock);
109         }
110
111         mutex_unlock(&ddev->struct_mutex);
112 }
113
114 /*
115  * Call from drm_gem_handle_create which appear in both new and open ioctl
116  * case.
117  */
118 int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
119 {
120         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
121         struct amdgpu_device *adev = rbo->adev;
122         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
123         struct amdgpu_vm *vm = &fpriv->vm;
124         struct amdgpu_bo_va *bo_va;
125         int r;
126         r = amdgpu_bo_reserve(rbo, false);
127         if (r)
128                 return r;
129
130         bo_va = amdgpu_vm_bo_find(vm, rbo);
131         if (!bo_va) {
132                 bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
133         } else {
134                 ++bo_va->ref_count;
135         }
136         amdgpu_bo_unreserve(rbo);
137         return 0;
138 }
139
140 void amdgpu_gem_object_close(struct drm_gem_object *obj,
141                              struct drm_file *file_priv)
142 {
143         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
144         struct amdgpu_device *adev = bo->adev;
145         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
146         struct amdgpu_vm *vm = &fpriv->vm;
147
148         struct amdgpu_bo_list_entry vm_pd;
149         struct list_head list, duplicates;
150         struct ttm_validate_buffer tv;
151         struct ww_acquire_ctx ticket;
152         struct amdgpu_bo_va *bo_va;
153         int r;
154
155         INIT_LIST_HEAD(&list);
156         INIT_LIST_HEAD(&duplicates);
157
158         tv.bo = &bo->tbo;
159         tv.shared = true;
160         list_add(&tv.head, &list);
161
162         amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
163
164         r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
165         if (r) {
166                 dev_err(adev->dev, "leaking bo va because "
167                         "we fail to reserve bo (%d)\n", r);
168                 return;
169         }
170         bo_va = amdgpu_vm_bo_find(vm, bo);
171         if (bo_va) {
172                 if (--bo_va->ref_count == 0) {
173                         amdgpu_vm_bo_rmv(adev, bo_va);
174                 }
175         }
176         ttm_eu_backoff_reservation(&ticket, &list);
177 }
178
179 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
180 {
181         if (r == -EDEADLK) {
182                 r = amdgpu_gpu_reset(adev);
183                 if (!r)
184                         r = -EAGAIN;
185         }
186         return r;
187 }
188
189 /*
190  * GEM ioctls.
191  */
192 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
193                             struct drm_file *filp)
194 {
195         struct amdgpu_device *adev = dev->dev_private;
196         union drm_amdgpu_gem_create *args = data;
197         uint64_t size = args->in.bo_size;
198         struct drm_gem_object *gobj;
199         uint32_t handle;
200         bool kernel = false;
201         int r;
202
203         /* create a gem object to contain this object in */
204         if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
205             AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
206                 kernel = true;
207                 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
208                         size = size << AMDGPU_GDS_SHIFT;
209                 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
210                         size = size << AMDGPU_GWS_SHIFT;
211                 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
212                         size = size << AMDGPU_OA_SHIFT;
213                 else {
214                         r = -EINVAL;
215                         goto error_unlock;
216                 }
217         }
218         size = roundup(size, PAGE_SIZE);
219
220         r = amdgpu_gem_object_create(adev, size, args->in.alignment,
221                                      (u32)(0xffffffff & args->in.domains),
222                                      args->in.domain_flags,
223                                      kernel, &gobj);
224         if (r)
225                 goto error_unlock;
226
227         r = drm_gem_handle_create(filp, gobj, &handle);
228         /* drop reference from allocate - handle holds it now */
229         drm_gem_object_unreference_unlocked(gobj);
230         if (r)
231                 goto error_unlock;
232
233         memset(args, 0, sizeof(*args));
234         args->out.handle = handle;
235         return 0;
236
237 error_unlock:
238         r = amdgpu_gem_handle_lockup(adev, r);
239         return r;
240 }
241
242 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
243                              struct drm_file *filp)
244 {
245         struct amdgpu_device *adev = dev->dev_private;
246         struct drm_amdgpu_gem_userptr *args = data;
247         struct drm_gem_object *gobj;
248         struct amdgpu_bo *bo;
249         uint32_t handle;
250         int r;
251
252         if (offset_in_page(args->addr | args->size))
253                 return -EINVAL;
254
255         /* reject unknown flag values */
256         if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
257             AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
258             AMDGPU_GEM_USERPTR_REGISTER))
259                 return -EINVAL;
260
261         if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
262              !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
263              !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
264
265                 /* if we want to write to it we must require anonymous
266                    memory and install a MMU notifier */
267                 return -EACCES;
268         }
269
270         /* create a gem object to contain this object in */
271         r = amdgpu_gem_object_create(adev, args->size, 0,
272                                      AMDGPU_GEM_DOMAIN_CPU, 0,
273                                      0, &gobj);
274         if (r)
275                 goto handle_lockup;
276
277         bo = gem_to_amdgpu_bo(gobj);
278         bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
279         bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
280         r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
281         if (r)
282                 goto release_object;
283
284         if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
285                 r = amdgpu_mn_register(bo, args->addr);
286                 if (r)
287                         goto release_object;
288         }
289
290         if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
291                 down_read(&current->mm->mmap_sem);
292
293                 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
294                                                  bo->tbo.ttm->pages);
295                 if (r)
296                         goto unlock_mmap_sem;
297
298                 r = amdgpu_bo_reserve(bo, true);
299                 if (r)
300                         goto free_pages;
301
302                 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
303                 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
304                 amdgpu_bo_unreserve(bo);
305                 if (r)
306                         goto free_pages;
307
308                 up_read(&current->mm->mmap_sem);
309         }
310
311         r = drm_gem_handle_create(filp, gobj, &handle);
312         /* drop reference from allocate - handle holds it now */
313         drm_gem_object_unreference_unlocked(gobj);
314         if (r)
315                 goto handle_lockup;
316
317         args->handle = handle;
318         return 0;
319
320 free_pages:
321         release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
322
323 unlock_mmap_sem:
324         up_read(&current->mm->mmap_sem);
325
326 release_object:
327         drm_gem_object_unreference_unlocked(gobj);
328
329 handle_lockup:
330         r = amdgpu_gem_handle_lockup(adev, r);
331
332         return r;
333 }
334
335 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
336                           struct drm_device *dev,
337                           uint32_t handle, uint64_t *offset_p)
338 {
339         struct drm_gem_object *gobj;
340         struct amdgpu_bo *robj;
341
342         gobj = drm_gem_object_lookup(dev, filp, handle);
343         if (gobj == NULL) {
344                 return -ENOENT;
345         }
346         robj = gem_to_amdgpu_bo(gobj);
347         if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
348             (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
349                 drm_gem_object_unreference_unlocked(gobj);
350                 return -EPERM;
351         }
352         *offset_p = amdgpu_bo_mmap_offset(robj);
353         drm_gem_object_unreference_unlocked(gobj);
354         return 0;
355 }
356
357 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
358                           struct drm_file *filp)
359 {
360         union drm_amdgpu_gem_mmap *args = data;
361         uint32_t handle = args->in.handle;
362         memset(args, 0, sizeof(*args));
363         return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
364 }
365
366 /**
367  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
368  *
369  * @timeout_ns: timeout in ns
370  *
371  * Calculate the timeout in jiffies from an absolute timeout in ns.
372  */
373 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
374 {
375         unsigned long timeout_jiffies;
376         ktime_t timeout;
377
378         /* clamp timeout if it's to large */
379         if (((int64_t)timeout_ns) < 0)
380                 return MAX_SCHEDULE_TIMEOUT;
381
382         timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
383         if (ktime_to_ns(timeout) < 0)
384                 return 0;
385
386         timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
387         /*  clamp timeout to avoid unsigned-> signed overflow */
388         if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
389                 return MAX_SCHEDULE_TIMEOUT - 1;
390
391         return timeout_jiffies;
392 }
393
394 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
395                               struct drm_file *filp)
396 {
397         struct amdgpu_device *adev = dev->dev_private;
398         union drm_amdgpu_gem_wait_idle *args = data;
399         struct drm_gem_object *gobj;
400         struct amdgpu_bo *robj;
401         uint32_t handle = args->in.handle;
402         unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
403         int r = 0;
404         long ret;
405
406         gobj = drm_gem_object_lookup(dev, filp, handle);
407         if (gobj == NULL) {
408                 return -ENOENT;
409         }
410         robj = gem_to_amdgpu_bo(gobj);
411         if (timeout == 0)
412                 ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
413         else
414                 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
415
416         /* ret == 0 means not signaled,
417          * ret > 0 means signaled
418          * ret < 0 means interrupted before timeout
419          */
420         if (ret >= 0) {
421                 memset(args, 0, sizeof(*args));
422                 args->out.status = (ret == 0);
423         } else
424                 r = ret;
425
426         drm_gem_object_unreference_unlocked(gobj);
427         r = amdgpu_gem_handle_lockup(adev, r);
428         return r;
429 }
430
431 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
432                                 struct drm_file *filp)
433 {
434         struct drm_amdgpu_gem_metadata *args = data;
435         struct drm_gem_object *gobj;
436         struct amdgpu_bo *robj;
437         int r = -1;
438
439         DRM_DEBUG("%d \n", args->handle);
440         gobj = drm_gem_object_lookup(dev, filp, args->handle);
441         if (gobj == NULL)
442                 return -ENOENT;
443         robj = gem_to_amdgpu_bo(gobj);
444
445         r = amdgpu_bo_reserve(robj, false);
446         if (unlikely(r != 0))
447                 goto out;
448
449         if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
450                 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
451                 r = amdgpu_bo_get_metadata(robj, args->data.data,
452                                            sizeof(args->data.data),
453                                            &args->data.data_size_bytes,
454                                            &args->data.flags);
455         } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
456                 if (args->data.data_size_bytes > sizeof(args->data.data)) {
457                         r = -EINVAL;
458                         goto unreserve;
459                 }
460                 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
461                 if (!r)
462                         r = amdgpu_bo_set_metadata(robj, args->data.data,
463                                                    args->data.data_size_bytes,
464                                                    args->data.flags);
465         }
466
467 unreserve:
468         amdgpu_bo_unreserve(robj);
469 out:
470         drm_gem_object_unreference_unlocked(gobj);
471         return r;
472 }
473
474 /**
475  * amdgpu_gem_va_update_vm -update the bo_va in its VM
476  *
477  * @adev: amdgpu_device pointer
478  * @bo_va: bo_va to update
479  *
480  * Update the bo_va directly after setting it's address. Errors are not
481  * vital here, so they are not reported back to userspace.
482  */
483 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
484                                     struct amdgpu_bo_va *bo_va, uint32_t operation)
485 {
486         struct ttm_validate_buffer tv, *entry;
487         struct amdgpu_bo_list_entry vm_pd;
488         struct ww_acquire_ctx ticket;
489         struct list_head list, duplicates;
490         unsigned domain;
491         int r;
492
493         INIT_LIST_HEAD(&list);
494         INIT_LIST_HEAD(&duplicates);
495
496         tv.bo = &bo_va->bo->tbo;
497         tv.shared = true;
498         list_add(&tv.head, &list);
499
500         amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
501
502         /* Provide duplicates to avoid -EALREADY */
503         r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
504         if (r)
505                 goto error_print;
506
507         amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
508         list_for_each_entry(entry, &list, head) {
509                 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
510                 /* if anything is swapped out don't swap it in here,
511                    just abort and wait for the next CS */
512                 if (domain == AMDGPU_GEM_DOMAIN_CPU)
513                         goto error_unreserve;
514         }
515         list_for_each_entry(entry, &duplicates, head) {
516                 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
517                 /* if anything is swapped out don't swap it in here,
518                    just abort and wait for the next CS */
519                 if (domain == AMDGPU_GEM_DOMAIN_CPU)
520                         goto error_unreserve;
521         }
522
523         r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
524         if (r)
525                 goto error_unreserve;
526
527         r = amdgpu_vm_clear_freed(adev, bo_va->vm);
528         if (r)
529                 goto error_unreserve;
530
531         if (operation == AMDGPU_VA_OP_MAP)
532                 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
533
534 error_unreserve:
535         ttm_eu_backoff_reservation(&ticket, &list);
536
537 error_print:
538         if (r && r != -ERESTARTSYS)
539                 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
540 }
541
542
543
544 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
545                           struct drm_file *filp)
546 {
547         struct drm_amdgpu_gem_va *args = data;
548         struct drm_gem_object *gobj;
549         struct amdgpu_device *adev = dev->dev_private;
550         struct amdgpu_fpriv *fpriv = filp->driver_priv;
551         struct amdgpu_bo *rbo;
552         struct amdgpu_bo_va *bo_va;
553         struct ttm_validate_buffer tv, tv_pd;
554         struct ww_acquire_ctx ticket;
555         struct list_head list, duplicates;
556         uint32_t invalid_flags, va_flags = 0;
557         int r = 0;
558
559         if (!adev->vm_manager.enabled)
560                 return -ENOTTY;
561
562         if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
563                 dev_err(&dev->pdev->dev,
564                         "va_address 0x%lX is in reserved area 0x%X\n",
565                         (unsigned long)args->va_address,
566                         AMDGPU_VA_RESERVED_SIZE);
567                 return -EINVAL;
568         }
569
570         invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
571                         AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
572         if ((args->flags & invalid_flags)) {
573                 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
574                         args->flags, invalid_flags);
575                 return -EINVAL;
576         }
577
578         switch (args->operation) {
579         case AMDGPU_VA_OP_MAP:
580         case AMDGPU_VA_OP_UNMAP:
581                 break;
582         default:
583                 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
584                         args->operation);
585                 return -EINVAL;
586         }
587
588         gobj = drm_gem_object_lookup(dev, filp, args->handle);
589         if (gobj == NULL)
590                 return -ENOENT;
591         rbo = gem_to_amdgpu_bo(gobj);
592         INIT_LIST_HEAD(&list);
593         INIT_LIST_HEAD(&duplicates);
594         tv.bo = &rbo->tbo;
595         tv.shared = true;
596         list_add(&tv.head, &list);
597
598         tv_pd.bo = &fpriv->vm.page_directory->tbo;
599         tv_pd.shared = true;
600         list_add(&tv_pd.head, &list);
601
602         r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
603         if (r) {
604                 drm_gem_object_unreference_unlocked(gobj);
605                 return r;
606         }
607
608         bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
609         if (!bo_va) {
610                 ttm_eu_backoff_reservation(&ticket, &list);
611                 drm_gem_object_unreference_unlocked(gobj);
612                 return -ENOENT;
613         }
614
615         switch (args->operation) {
616         case AMDGPU_VA_OP_MAP:
617                 if (args->flags & AMDGPU_VM_PAGE_READABLE)
618                         va_flags |= AMDGPU_PTE_READABLE;
619                 if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
620                         va_flags |= AMDGPU_PTE_WRITEABLE;
621                 if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
622                         va_flags |= AMDGPU_PTE_EXECUTABLE;
623                 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
624                                      args->offset_in_bo, args->map_size,
625                                      va_flags);
626                 break;
627         case AMDGPU_VA_OP_UNMAP:
628                 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
629                 break;
630         default:
631                 break;
632         }
633         ttm_eu_backoff_reservation(&ticket, &list);
634         if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
635             !amdgpu_vm_debug)
636                 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
637
638         drm_gem_object_unreference_unlocked(gobj);
639         return r;
640 }
641
642 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
643                         struct drm_file *filp)
644 {
645         struct drm_amdgpu_gem_op *args = data;
646         struct drm_gem_object *gobj;
647         struct amdgpu_bo *robj;
648         int r;
649
650         gobj = drm_gem_object_lookup(dev, filp, args->handle);
651         if (gobj == NULL) {
652                 return -ENOENT;
653         }
654         robj = gem_to_amdgpu_bo(gobj);
655
656         r = amdgpu_bo_reserve(robj, false);
657         if (unlikely(r))
658                 goto out;
659
660         switch (args->op) {
661         case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
662                 struct drm_amdgpu_gem_create_in info;
663                 void __user *out = (void __user *)(long)args->value;
664
665                 info.bo_size = robj->gem_base.size;
666                 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
667                 info.domains = robj->prefered_domains;
668                 info.domain_flags = robj->flags;
669                 amdgpu_bo_unreserve(robj);
670                 if (copy_to_user(out, &info, sizeof(info)))
671                         r = -EFAULT;
672                 break;
673         }
674         case AMDGPU_GEM_OP_SET_PLACEMENT:
675                 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
676                         r = -EPERM;
677                         amdgpu_bo_unreserve(robj);
678                         break;
679                 }
680                 robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
681                                                         AMDGPU_GEM_DOMAIN_GTT |
682                                                         AMDGPU_GEM_DOMAIN_CPU);
683                 robj->allowed_domains = robj->prefered_domains;
684                 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
685                         robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
686
687                 amdgpu_bo_unreserve(robj);
688                 break;
689         default:
690                 amdgpu_bo_unreserve(robj);
691                 r = -EINVAL;
692         }
693
694 out:
695         drm_gem_object_unreference_unlocked(gobj);
696         return r;
697 }
698
699 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
700                             struct drm_device *dev,
701                             struct drm_mode_create_dumb *args)
702 {
703         struct amdgpu_device *adev = dev->dev_private;
704         struct drm_gem_object *gobj;
705         uint32_t handle;
706         int r;
707
708         args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
709         args->size = (u64)args->pitch * args->height;
710         args->size = ALIGN(args->size, PAGE_SIZE);
711
712         r = amdgpu_gem_object_create(adev, args->size, 0,
713                                      AMDGPU_GEM_DOMAIN_VRAM,
714                                      AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
715                                      ttm_bo_type_device,
716                                      &gobj);
717         if (r)
718                 return -ENOMEM;
719
720         r = drm_gem_handle_create(file_priv, gobj, &handle);
721         /* drop reference from allocate - handle holds it now */
722         drm_gem_object_unreference_unlocked(gobj);
723         if (r) {
724                 return r;
725         }
726         args->handle = handle;
727         return 0;
728 }
729
730 #if defined(CONFIG_DEBUG_FS)
731 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
732 {
733         struct drm_gem_object *gobj = ptr;
734         struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
735         struct seq_file *m = data;
736
737         unsigned domain;
738         const char *placement;
739         unsigned pin_count;
740
741         domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
742         switch (domain) {
743         case AMDGPU_GEM_DOMAIN_VRAM:
744                 placement = "VRAM";
745                 break;
746         case AMDGPU_GEM_DOMAIN_GTT:
747                 placement = " GTT";
748                 break;
749         case AMDGPU_GEM_DOMAIN_CPU:
750         default:
751                 placement = " CPU";
752                 break;
753         }
754         seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
755                    id, amdgpu_bo_size(bo), placement,
756                    amdgpu_bo_gpu_offset(bo));
757
758         pin_count = ACCESS_ONCE(bo->pin_count);
759         if (pin_count)
760                 seq_printf(m, " pin count %d", pin_count);
761         seq_printf(m, "\n");
762
763         return 0;
764 }
765
766 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
767 {
768         struct drm_info_node *node = (struct drm_info_node *)m->private;
769         struct drm_device *dev = node->minor->dev;
770         struct drm_file *file;
771         int r;
772
773         r = mutex_lock_interruptible(&dev->struct_mutex);
774         if (r)
775                 return r;
776
777         list_for_each_entry(file, &dev->filelist, lhead) {
778                 struct task_struct *task;
779
780                 /*
781                  * Although we have a valid reference on file->pid, that does
782                  * not guarantee that the task_struct who called get_pid() is
783                  * still alive (e.g. get_pid(current) => fork() => exit()).
784                  * Therefore, we need to protect this ->comm access using RCU.
785                  */
786                 rcu_read_lock();
787                 task = pid_task(file->pid, PIDTYPE_PID);
788                 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
789                            task ? task->comm : "<unknown>");
790                 rcu_read_unlock();
791
792                 spin_lock(&file->table_lock);
793                 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
794                 spin_unlock(&file->table_lock);
795         }
796
797         mutex_unlock(&dev->struct_mutex);
798         return 0;
799 }
800
801 static struct drm_info_list amdgpu_debugfs_gem_list[] = {
802         {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
803 };
804 #endif
805
806 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
807 {
808 #if defined(CONFIG_DEBUG_FS)
809         return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
810 #endif
811         return 0;
812 }
This page took 0.082615 seconds and 4 git commands to generate.