]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
Merge tag 'drm/tegra/for-4.6-rc1' of http://anongit.freedesktop.org/git/tegra/linux...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gem.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <drm/drmP.h>
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu.h"
32
33 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
34 {
35         struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
36
37         if (robj) {
38                 if (robj->gem_base.import_attach)
39                         drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
40                 amdgpu_mn_unregister(robj);
41                 amdgpu_bo_unref(&robj);
42         }
43 }
44
45 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
46                                 int alignment, u32 initial_domain,
47                                 u64 flags, bool kernel,
48                                 struct drm_gem_object **obj)
49 {
50         struct amdgpu_bo *robj;
51         unsigned long max_size;
52         int r;
53
54         *obj = NULL;
55         /* At least align on page size */
56         if (alignment < PAGE_SIZE) {
57                 alignment = PAGE_SIZE;
58         }
59
60         if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
61                 /* Maximum bo size is the unpinned gtt size since we use the gtt to
62                  * handle vram to system pool migrations.
63                  */
64                 max_size = adev->mc.gtt_size - adev->gart_pin_size;
65                 if (size > max_size) {
66                         DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
67                                   size >> 20, max_size >> 20);
68                         return -ENOMEM;
69                 }
70         }
71 retry:
72         r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
73                              flags, NULL, NULL, &robj);
74         if (r) {
75                 if (r != -ERESTARTSYS) {
76                         if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
77                                 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
78                                 goto retry;
79                         }
80                         DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
81                                   size, initial_domain, alignment, r);
82                 }
83                 return r;
84         }
85         *obj = &robj->gem_base;
86
87         return 0;
88 }
89
90 void amdgpu_gem_force_release(struct amdgpu_device *adev)
91 {
92         struct drm_device *ddev = adev->ddev;
93         struct drm_file *file;
94
95         mutex_lock(&ddev->struct_mutex);
96
97         list_for_each_entry(file, &ddev->filelist, lhead) {
98                 struct drm_gem_object *gobj;
99                 int handle;
100
101                 WARN_ONCE(1, "Still active user space clients!\n");
102                 spin_lock(&file->table_lock);
103                 idr_for_each_entry(&file->object_idr, gobj, handle) {
104                         WARN_ONCE(1, "And also active allocations!\n");
105                         drm_gem_object_unreference(gobj);
106                 }
107                 idr_destroy(&file->object_idr);
108                 spin_unlock(&file->table_lock);
109         }
110
111         mutex_unlock(&ddev->struct_mutex);
112 }
113
114 /*
115  * Call from drm_gem_handle_create which appear in both new and open ioctl
116  * case.
117  */
118 int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
119 {
120         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
121         struct amdgpu_device *adev = rbo->adev;
122         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
123         struct amdgpu_vm *vm = &fpriv->vm;
124         struct amdgpu_bo_va *bo_va;
125         int r;
126         r = amdgpu_bo_reserve(rbo, false);
127         if (r)
128                 return r;
129
130         bo_va = amdgpu_vm_bo_find(vm, rbo);
131         if (!bo_va) {
132                 bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
133         } else {
134                 ++bo_va->ref_count;
135         }
136         amdgpu_bo_unreserve(rbo);
137         return 0;
138 }
139
140 void amdgpu_gem_object_close(struct drm_gem_object *obj,
141                              struct drm_file *file_priv)
142 {
143         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
144         struct amdgpu_device *adev = rbo->adev;
145         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
146         struct amdgpu_vm *vm = &fpriv->vm;
147         struct amdgpu_bo_va *bo_va;
148         int r;
149         r = amdgpu_bo_reserve(rbo, true);
150         if (r) {
151                 dev_err(adev->dev, "leaking bo va because "
152                         "we fail to reserve bo (%d)\n", r);
153                 return;
154         }
155         bo_va = amdgpu_vm_bo_find(vm, rbo);
156         if (bo_va) {
157                 if (--bo_va->ref_count == 0) {
158                         amdgpu_vm_bo_rmv(adev, bo_va);
159                 }
160         }
161         amdgpu_bo_unreserve(rbo);
162 }
163
164 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
165 {
166         if (r == -EDEADLK) {
167                 r = amdgpu_gpu_reset(adev);
168                 if (!r)
169                         r = -EAGAIN;
170         }
171         return r;
172 }
173
174 /*
175  * GEM ioctls.
176  */
177 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
178                             struct drm_file *filp)
179 {
180         struct amdgpu_device *adev = dev->dev_private;
181         union drm_amdgpu_gem_create *args = data;
182         uint64_t size = args->in.bo_size;
183         struct drm_gem_object *gobj;
184         uint32_t handle;
185         bool kernel = false;
186         int r;
187
188         /* create a gem object to contain this object in */
189         if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
190             AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
191                 kernel = true;
192                 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
193                         size = size << AMDGPU_GDS_SHIFT;
194                 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
195                         size = size << AMDGPU_GWS_SHIFT;
196                 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
197                         size = size << AMDGPU_OA_SHIFT;
198                 else {
199                         r = -EINVAL;
200                         goto error_unlock;
201                 }
202         }
203         size = roundup(size, PAGE_SIZE);
204
205         r = amdgpu_gem_object_create(adev, size, args->in.alignment,
206                                      (u32)(0xffffffff & args->in.domains),
207                                      args->in.domain_flags,
208                                      kernel, &gobj);
209         if (r)
210                 goto error_unlock;
211
212         r = drm_gem_handle_create(filp, gobj, &handle);
213         /* drop reference from allocate - handle holds it now */
214         drm_gem_object_unreference_unlocked(gobj);
215         if (r)
216                 goto error_unlock;
217
218         memset(args, 0, sizeof(*args));
219         args->out.handle = handle;
220         return 0;
221
222 error_unlock:
223         r = amdgpu_gem_handle_lockup(adev, r);
224         return r;
225 }
226
227 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
228                              struct drm_file *filp)
229 {
230         struct amdgpu_device *adev = dev->dev_private;
231         struct drm_amdgpu_gem_userptr *args = data;
232         struct drm_gem_object *gobj;
233         struct amdgpu_bo *bo;
234         uint32_t handle;
235         int r;
236
237         if (offset_in_page(args->addr | args->size))
238                 return -EINVAL;
239
240         /* reject unknown flag values */
241         if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
242             AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
243             AMDGPU_GEM_USERPTR_REGISTER))
244                 return -EINVAL;
245
246         if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
247              !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
248              !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
249
250                 /* if we want to write to it we must require anonymous
251                    memory and install a MMU notifier */
252                 return -EACCES;
253         }
254
255         /* create a gem object to contain this object in */
256         r = amdgpu_gem_object_create(adev, args->size, 0,
257                                      AMDGPU_GEM_DOMAIN_CPU, 0,
258                                      0, &gobj);
259         if (r)
260                 goto handle_lockup;
261
262         bo = gem_to_amdgpu_bo(gobj);
263         bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
264         bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
265         r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
266         if (r)
267                 goto release_object;
268
269         if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
270                 r = amdgpu_mn_register(bo, args->addr);
271                 if (r)
272                         goto release_object;
273         }
274
275         if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
276                 down_read(&current->mm->mmap_sem);
277                 r = amdgpu_bo_reserve(bo, true);
278                 if (r) {
279                         up_read(&current->mm->mmap_sem);
280                         goto release_object;
281                 }
282
283                 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
284                 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
285                 amdgpu_bo_unreserve(bo);
286                 up_read(&current->mm->mmap_sem);
287                 if (r)
288                         goto release_object;
289         }
290
291         r = drm_gem_handle_create(filp, gobj, &handle);
292         /* drop reference from allocate - handle holds it now */
293         drm_gem_object_unreference_unlocked(gobj);
294         if (r)
295                 goto handle_lockup;
296
297         args->handle = handle;
298         return 0;
299
300 release_object:
301         drm_gem_object_unreference_unlocked(gobj);
302
303 handle_lockup:
304         r = amdgpu_gem_handle_lockup(adev, r);
305
306         return r;
307 }
308
309 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
310                           struct drm_device *dev,
311                           uint32_t handle, uint64_t *offset_p)
312 {
313         struct drm_gem_object *gobj;
314         struct amdgpu_bo *robj;
315
316         gobj = drm_gem_object_lookup(dev, filp, handle);
317         if (gobj == NULL) {
318                 return -ENOENT;
319         }
320         robj = gem_to_amdgpu_bo(gobj);
321         if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
322             (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
323                 drm_gem_object_unreference_unlocked(gobj);
324                 return -EPERM;
325         }
326         *offset_p = amdgpu_bo_mmap_offset(robj);
327         drm_gem_object_unreference_unlocked(gobj);
328         return 0;
329 }
330
331 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
332                           struct drm_file *filp)
333 {
334         union drm_amdgpu_gem_mmap *args = data;
335         uint32_t handle = args->in.handle;
336         memset(args, 0, sizeof(*args));
337         return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
338 }
339
340 /**
341  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
342  *
343  * @timeout_ns: timeout in ns
344  *
345  * Calculate the timeout in jiffies from an absolute timeout in ns.
346  */
347 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
348 {
349         unsigned long timeout_jiffies;
350         ktime_t timeout;
351
352         /* clamp timeout if it's to large */
353         if (((int64_t)timeout_ns) < 0)
354                 return MAX_SCHEDULE_TIMEOUT;
355
356         timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
357         if (ktime_to_ns(timeout) < 0)
358                 return 0;
359
360         timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
361         /*  clamp timeout to avoid unsigned-> signed overflow */
362         if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
363                 return MAX_SCHEDULE_TIMEOUT - 1;
364
365         return timeout_jiffies;
366 }
367
368 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
369                               struct drm_file *filp)
370 {
371         struct amdgpu_device *adev = dev->dev_private;
372         union drm_amdgpu_gem_wait_idle *args = data;
373         struct drm_gem_object *gobj;
374         struct amdgpu_bo *robj;
375         uint32_t handle = args->in.handle;
376         unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
377         int r = 0;
378         long ret;
379
380         gobj = drm_gem_object_lookup(dev, filp, handle);
381         if (gobj == NULL) {
382                 return -ENOENT;
383         }
384         robj = gem_to_amdgpu_bo(gobj);
385         if (timeout == 0)
386                 ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
387         else
388                 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
389
390         /* ret == 0 means not signaled,
391          * ret > 0 means signaled
392          * ret < 0 means interrupted before timeout
393          */
394         if (ret >= 0) {
395                 memset(args, 0, sizeof(*args));
396                 args->out.status = (ret == 0);
397         } else
398                 r = ret;
399
400         drm_gem_object_unreference_unlocked(gobj);
401         r = amdgpu_gem_handle_lockup(adev, r);
402         return r;
403 }
404
405 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
406                                 struct drm_file *filp)
407 {
408         struct drm_amdgpu_gem_metadata *args = data;
409         struct drm_gem_object *gobj;
410         struct amdgpu_bo *robj;
411         int r = -1;
412
413         DRM_DEBUG("%d \n", args->handle);
414         gobj = drm_gem_object_lookup(dev, filp, args->handle);
415         if (gobj == NULL)
416                 return -ENOENT;
417         robj = gem_to_amdgpu_bo(gobj);
418
419         r = amdgpu_bo_reserve(robj, false);
420         if (unlikely(r != 0))
421                 goto out;
422
423         if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
424                 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
425                 r = amdgpu_bo_get_metadata(robj, args->data.data,
426                                            sizeof(args->data.data),
427                                            &args->data.data_size_bytes,
428                                            &args->data.flags);
429         } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
430                 if (args->data.data_size_bytes > sizeof(args->data.data)) {
431                         r = -EINVAL;
432                         goto unreserve;
433                 }
434                 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
435                 if (!r)
436                         r = amdgpu_bo_set_metadata(robj, args->data.data,
437                                                    args->data.data_size_bytes,
438                                                    args->data.flags);
439         }
440
441 unreserve:
442         amdgpu_bo_unreserve(robj);
443 out:
444         drm_gem_object_unreference_unlocked(gobj);
445         return r;
446 }
447
448 /**
449  * amdgpu_gem_va_update_vm -update the bo_va in its VM
450  *
451  * @adev: amdgpu_device pointer
452  * @bo_va: bo_va to update
453  *
454  * Update the bo_va directly after setting it's address. Errors are not
455  * vital here, so they are not reported back to userspace.
456  */
457 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
458                                     struct amdgpu_bo_va *bo_va, uint32_t operation)
459 {
460         struct ttm_validate_buffer tv, *entry;
461         struct amdgpu_bo_list_entry vm_pd;
462         struct ww_acquire_ctx ticket;
463         struct list_head list, duplicates;
464         unsigned domain;
465         int r;
466
467         INIT_LIST_HEAD(&list);
468         INIT_LIST_HEAD(&duplicates);
469
470         tv.bo = &bo_va->bo->tbo;
471         tv.shared = true;
472         list_add(&tv.head, &list);
473
474         amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
475
476         /* Provide duplicates to avoid -EALREADY */
477         r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
478         if (r)
479                 goto error_print;
480
481         amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
482         list_for_each_entry(entry, &list, head) {
483                 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
484                 /* if anything is swapped out don't swap it in here,
485                    just abort and wait for the next CS */
486                 if (domain == AMDGPU_GEM_DOMAIN_CPU)
487                         goto error_unreserve;
488         }
489         list_for_each_entry(entry, &duplicates, head) {
490                 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
491                 /* if anything is swapped out don't swap it in here,
492                    just abort and wait for the next CS */
493                 if (domain == AMDGPU_GEM_DOMAIN_CPU)
494                         goto error_unreserve;
495         }
496
497         r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
498         if (r)
499                 goto error_unreserve;
500
501         r = amdgpu_vm_clear_freed(adev, bo_va->vm);
502         if (r)
503                 goto error_unreserve;
504
505         if (operation == AMDGPU_VA_OP_MAP)
506                 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
507
508 error_unreserve:
509         ttm_eu_backoff_reservation(&ticket, &list);
510
511 error_print:
512         if (r && r != -ERESTARTSYS)
513                 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
514 }
515
516
517
518 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
519                           struct drm_file *filp)
520 {
521         struct drm_amdgpu_gem_va *args = data;
522         struct drm_gem_object *gobj;
523         struct amdgpu_device *adev = dev->dev_private;
524         struct amdgpu_fpriv *fpriv = filp->driver_priv;
525         struct amdgpu_bo *rbo;
526         struct amdgpu_bo_va *bo_va;
527         struct ttm_validate_buffer tv, tv_pd;
528         struct ww_acquire_ctx ticket;
529         struct list_head list, duplicates;
530         uint32_t invalid_flags, va_flags = 0;
531         int r = 0;
532
533         if (!adev->vm_manager.enabled)
534                 return -ENOTTY;
535
536         if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
537                 dev_err(&dev->pdev->dev,
538                         "va_address 0x%lX is in reserved area 0x%X\n",
539                         (unsigned long)args->va_address,
540                         AMDGPU_VA_RESERVED_SIZE);
541                 return -EINVAL;
542         }
543
544         invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
545                         AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
546         if ((args->flags & invalid_flags)) {
547                 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
548                         args->flags, invalid_flags);
549                 return -EINVAL;
550         }
551
552         switch (args->operation) {
553         case AMDGPU_VA_OP_MAP:
554         case AMDGPU_VA_OP_UNMAP:
555                 break;
556         default:
557                 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
558                         args->operation);
559                 return -EINVAL;
560         }
561
562         gobj = drm_gem_object_lookup(dev, filp, args->handle);
563         if (gobj == NULL)
564                 return -ENOENT;
565         rbo = gem_to_amdgpu_bo(gobj);
566         INIT_LIST_HEAD(&list);
567         INIT_LIST_HEAD(&duplicates);
568         tv.bo = &rbo->tbo;
569         tv.shared = true;
570         list_add(&tv.head, &list);
571
572         if (args->operation == AMDGPU_VA_OP_MAP) {
573                 tv_pd.bo = &fpriv->vm.page_directory->tbo;
574                 tv_pd.shared = true;
575                 list_add(&tv_pd.head, &list);
576         }
577         r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
578         if (r) {
579                 drm_gem_object_unreference_unlocked(gobj);
580                 return r;
581         }
582
583         bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
584         if (!bo_va) {
585                 ttm_eu_backoff_reservation(&ticket, &list);
586                 drm_gem_object_unreference_unlocked(gobj);
587                 return -ENOENT;
588         }
589
590         switch (args->operation) {
591         case AMDGPU_VA_OP_MAP:
592                 if (args->flags & AMDGPU_VM_PAGE_READABLE)
593                         va_flags |= AMDGPU_PTE_READABLE;
594                 if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
595                         va_flags |= AMDGPU_PTE_WRITEABLE;
596                 if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
597                         va_flags |= AMDGPU_PTE_EXECUTABLE;
598                 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
599                                      args->offset_in_bo, args->map_size,
600                                      va_flags);
601                 break;
602         case AMDGPU_VA_OP_UNMAP:
603                 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
604                 break;
605         default:
606                 break;
607         }
608         ttm_eu_backoff_reservation(&ticket, &list);
609         if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
610             !amdgpu_vm_debug)
611                 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
612
613         drm_gem_object_unreference_unlocked(gobj);
614         return r;
615 }
616
617 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
618                         struct drm_file *filp)
619 {
620         struct drm_amdgpu_gem_op *args = data;
621         struct drm_gem_object *gobj;
622         struct amdgpu_bo *robj;
623         int r;
624
625         gobj = drm_gem_object_lookup(dev, filp, args->handle);
626         if (gobj == NULL) {
627                 return -ENOENT;
628         }
629         robj = gem_to_amdgpu_bo(gobj);
630
631         r = amdgpu_bo_reserve(robj, false);
632         if (unlikely(r))
633                 goto out;
634
635         switch (args->op) {
636         case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
637                 struct drm_amdgpu_gem_create_in info;
638                 void __user *out = (void __user *)(long)args->value;
639
640                 info.bo_size = robj->gem_base.size;
641                 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
642                 info.domains = robj->prefered_domains;
643                 info.domain_flags = robj->flags;
644                 amdgpu_bo_unreserve(robj);
645                 if (copy_to_user(out, &info, sizeof(info)))
646                         r = -EFAULT;
647                 break;
648         }
649         case AMDGPU_GEM_OP_SET_PLACEMENT:
650                 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
651                         r = -EPERM;
652                         amdgpu_bo_unreserve(robj);
653                         break;
654                 }
655                 robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
656                                                         AMDGPU_GEM_DOMAIN_GTT |
657                                                         AMDGPU_GEM_DOMAIN_CPU);
658                 robj->allowed_domains = robj->prefered_domains;
659                 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
660                         robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
661
662                 amdgpu_bo_unreserve(robj);
663                 break;
664         default:
665                 amdgpu_bo_unreserve(robj);
666                 r = -EINVAL;
667         }
668
669 out:
670         drm_gem_object_unreference_unlocked(gobj);
671         return r;
672 }
673
674 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
675                             struct drm_device *dev,
676                             struct drm_mode_create_dumb *args)
677 {
678         struct amdgpu_device *adev = dev->dev_private;
679         struct drm_gem_object *gobj;
680         uint32_t handle;
681         int r;
682
683         args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
684         args->size = (u64)args->pitch * args->height;
685         args->size = ALIGN(args->size, PAGE_SIZE);
686
687         r = amdgpu_gem_object_create(adev, args->size, 0,
688                                      AMDGPU_GEM_DOMAIN_VRAM,
689                                      AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
690                                      ttm_bo_type_device,
691                                      &gobj);
692         if (r)
693                 return -ENOMEM;
694
695         r = drm_gem_handle_create(file_priv, gobj, &handle);
696         /* drop reference from allocate - handle holds it now */
697         drm_gem_object_unreference_unlocked(gobj);
698         if (r) {
699                 return r;
700         }
701         args->handle = handle;
702         return 0;
703 }
704
705 #if defined(CONFIG_DEBUG_FS)
706 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
707 {
708         struct drm_gem_object *gobj = ptr;
709         struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
710         struct seq_file *m = data;
711
712         unsigned domain;
713         const char *placement;
714         unsigned pin_count;
715
716         domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
717         switch (domain) {
718         case AMDGPU_GEM_DOMAIN_VRAM:
719                 placement = "VRAM";
720                 break;
721         case AMDGPU_GEM_DOMAIN_GTT:
722                 placement = " GTT";
723                 break;
724         case AMDGPU_GEM_DOMAIN_CPU:
725         default:
726                 placement = " CPU";
727                 break;
728         }
729         seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
730                    id, amdgpu_bo_size(bo), placement,
731                    amdgpu_bo_gpu_offset(bo));
732
733         pin_count = ACCESS_ONCE(bo->pin_count);
734         if (pin_count)
735                 seq_printf(m, " pin count %d", pin_count);
736         seq_printf(m, "\n");
737
738         return 0;
739 }
740
741 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
742 {
743         struct drm_info_node *node = (struct drm_info_node *)m->private;
744         struct drm_device *dev = node->minor->dev;
745         struct drm_file *file;
746         int r;
747
748         r = mutex_lock_interruptible(&dev->struct_mutex);
749         if (r)
750                 return r;
751
752         list_for_each_entry(file, &dev->filelist, lhead) {
753                 struct task_struct *task;
754
755                 /*
756                  * Although we have a valid reference on file->pid, that does
757                  * not guarantee that the task_struct who called get_pid() is
758                  * still alive (e.g. get_pid(current) => fork() => exit()).
759                  * Therefore, we need to protect this ->comm access using RCU.
760                  */
761                 rcu_read_lock();
762                 task = pid_task(file->pid, PIDTYPE_PID);
763                 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
764                            task ? task->comm : "<unknown>");
765                 rcu_read_unlock();
766
767                 spin_lock(&file->table_lock);
768                 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
769                 spin_unlock(&file->table_lock);
770         }
771
772         mutex_unlock(&dev->struct_mutex);
773         return 0;
774 }
775
776 static struct drm_info_list amdgpu_debugfs_gem_list[] = {
777         {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
778 };
779 #endif
780
781 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
782 {
783 #if defined(CONFIG_DEBUG_FS)
784         return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
785 #endif
786         return 0;
787 }
This page took 0.079665 seconds and 4 git commands to generate.