2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/iosys-map.h>
30 #include <linux/pci.h>
32 #include <drm/drm_device.h>
33 #include <drm/drm_file.h>
34 #include <drm/drm_gem_ttm_helper.h>
35 #include <drm/radeon_drm.h>
38 #include "radeon_prime.h"
40 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
42 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
43 int radeon_gem_prime_pin(struct drm_gem_object *obj);
44 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
46 const struct drm_gem_object_funcs radeon_gem_object_funcs;
48 static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
50 struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
51 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
54 down_read(&rdev->pm.mclk_lock);
56 ret = ttm_bo_vm_reserve(bo, vmf);
60 ret = radeon_bo_fault_reserve_notify(bo);
64 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
65 TTM_BO_VM_NUM_PREFAULT);
66 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
70 dma_resv_unlock(bo->base.resv);
73 up_read(&rdev->pm.mclk_lock);
77 static const struct vm_operations_struct radeon_gem_vm_ops = {
78 .fault = radeon_gem_fault,
79 .open = ttm_bo_vm_open,
80 .close = ttm_bo_vm_close,
81 .access = ttm_bo_vm_access
84 static void radeon_gem_object_free(struct drm_gem_object *gobj)
86 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
89 radeon_mn_unregister(robj);
90 radeon_bo_unref(&robj);
94 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
95 int alignment, int initial_domain,
96 u32 flags, bool kernel,
97 struct drm_gem_object **obj)
99 struct radeon_bo *robj;
100 unsigned long max_size;
104 /* At least align on page size */
105 if (alignment < PAGE_SIZE) {
106 alignment = PAGE_SIZE;
109 /* Maximum bo size is the unpinned gtt size since we use the gtt to
110 * handle vram to system pool migrations.
112 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
113 if (size > max_size) {
114 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
115 size >> 20, max_size >> 20);
120 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
121 flags, NULL, NULL, &robj);
123 if (r != -ERESTARTSYS) {
124 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
125 initial_domain |= RADEON_GEM_DOMAIN_GTT;
128 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
129 size, initial_domain, alignment, r);
133 *obj = &robj->tbo.base;
134 (*obj)->funcs = &radeon_gem_object_funcs;
135 robj->pid = task_pid_nr(current);
137 mutex_lock(&rdev->gem.mutex);
138 list_add_tail(&robj->list, &rdev->gem.objects);
139 mutex_unlock(&rdev->gem.mutex);
144 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
145 uint32_t rdomain, uint32_t wdomain)
147 struct radeon_bo *robj;
151 /* FIXME: reeimplement */
152 robj = gem_to_radeon_bo(gobj);
153 /* work out where to validate the buffer to */
160 pr_warn("Set domain without domain !\n");
163 if (domain == RADEON_GEM_DOMAIN_CPU) {
164 /* Asking for cpu access wait for object idle */
165 r = dma_resv_wait_timeout(robj->tbo.base.resv,
166 DMA_RESV_USAGE_BOOKKEEP,
171 if (r < 0 && r != -EINTR) {
172 pr_err("Failed to wait for object: %li\n", r);
176 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
177 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
183 int radeon_gem_init(struct radeon_device *rdev)
185 INIT_LIST_HEAD(&rdev->gem.objects);
189 void radeon_gem_fini(struct radeon_device *rdev)
191 radeon_bo_force_delete(rdev);
195 * Call from drm_gem_handle_create which appear in both new and open ioctl
198 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
200 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
201 struct radeon_device *rdev = rbo->rdev;
202 struct radeon_fpriv *fpriv = file_priv->driver_priv;
203 struct radeon_vm *vm = &fpriv->vm;
204 struct radeon_bo_va *bo_va;
207 if ((rdev->family < CHIP_CAYMAN) ||
208 (!rdev->accel_working)) {
212 r = radeon_bo_reserve(rbo, false);
217 bo_va = radeon_vm_bo_find(vm, rbo);
219 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
223 radeon_bo_unreserve(rbo);
228 static void radeon_gem_object_close(struct drm_gem_object *obj,
229 struct drm_file *file_priv)
231 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
232 struct radeon_device *rdev = rbo->rdev;
233 struct radeon_fpriv *fpriv = file_priv->driver_priv;
234 struct radeon_vm *vm = &fpriv->vm;
235 struct radeon_bo_va *bo_va;
238 if ((rdev->family < CHIP_CAYMAN) ||
239 (!rdev->accel_working)) {
243 r = radeon_bo_reserve(rbo, true);
245 dev_err(rdev->dev, "leaking bo va because "
246 "we fail to reserve bo (%d)\n", r);
249 bo_va = radeon_vm_bo_find(vm, rbo);
251 if (--bo_va->ref_count == 0) {
252 radeon_vm_bo_rmv(rdev, bo_va);
255 radeon_bo_unreserve(rbo);
258 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
261 r = radeon_gpu_reset(rdev);
268 static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
270 struct radeon_bo *bo = gem_to_radeon_bo(obj);
271 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
273 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
276 return drm_gem_ttm_mmap(obj, vma);
279 const struct drm_gem_object_funcs radeon_gem_object_funcs = {
280 .free = radeon_gem_object_free,
281 .open = radeon_gem_object_open,
282 .close = radeon_gem_object_close,
283 .export = radeon_gem_prime_export,
284 .pin = radeon_gem_prime_pin,
285 .unpin = radeon_gem_prime_unpin,
286 .get_sg_table = radeon_gem_prime_get_sg_table,
287 .vmap = drm_gem_ttm_vmap,
288 .vunmap = drm_gem_ttm_vunmap,
289 .mmap = radeon_gem_object_mmap,
290 .vm_ops = &radeon_gem_vm_ops,
296 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
297 struct drm_file *filp)
299 struct radeon_device *rdev = dev->dev_private;
300 struct drm_radeon_gem_info *args = data;
301 struct ttm_resource_manager *man;
303 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
305 args->vram_size = (u64)man->size << PAGE_SHIFT;
306 args->vram_visible = rdev->mc.visible_vram_size;
307 args->vram_visible -= rdev->vram_pin_size;
308 args->gart_size = rdev->mc.gtt_size;
309 args->gart_size -= rdev->gart_pin_size;
314 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
315 struct drm_file *filp)
317 /* TODO: implement */
318 DRM_ERROR("unimplemented %s\n", __func__);
322 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
323 struct drm_file *filp)
325 /* TODO: implement */
326 DRM_ERROR("unimplemented %s\n", __func__);
330 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
331 struct drm_file *filp)
333 struct radeon_device *rdev = dev->dev_private;
334 struct drm_radeon_gem_create *args = data;
335 struct drm_gem_object *gobj;
339 down_read(&rdev->exclusive_lock);
340 /* create a gem object to contain this object in */
341 args->size = roundup(args->size, PAGE_SIZE);
342 r = radeon_gem_object_create(rdev, args->size, args->alignment,
343 args->initial_domain, args->flags,
346 up_read(&rdev->exclusive_lock);
347 r = radeon_gem_handle_lockup(rdev, r);
350 r = drm_gem_handle_create(filp, gobj, &handle);
351 /* drop reference from allocate - handle holds it now */
352 drm_gem_object_put(gobj);
354 up_read(&rdev->exclusive_lock);
355 r = radeon_gem_handle_lockup(rdev, r);
358 args->handle = handle;
359 up_read(&rdev->exclusive_lock);
363 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
364 struct drm_file *filp)
366 struct ttm_operation_ctx ctx = { true, false };
367 struct radeon_device *rdev = dev->dev_private;
368 struct drm_radeon_gem_userptr *args = data;
369 struct drm_gem_object *gobj;
370 struct radeon_bo *bo;
374 args->addr = untagged_addr(args->addr);
376 if (offset_in_page(args->addr | args->size))
379 /* reject unknown flag values */
380 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
381 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
382 RADEON_GEM_USERPTR_REGISTER))
385 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
386 /* readonly pages not tested on older hardware */
387 if (rdev->family < CHIP_R600)
390 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
391 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
393 /* if we want to write to it we must require anonymous
394 memory and install a MMU notifier */
398 down_read(&rdev->exclusive_lock);
400 /* create a gem object to contain this object in */
401 r = radeon_gem_object_create(rdev, args->size, 0,
402 RADEON_GEM_DOMAIN_CPU, 0,
407 bo = gem_to_radeon_bo(gobj);
408 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
412 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
413 r = radeon_mn_register(bo, args->addr);
418 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
419 mmap_read_lock(current->mm);
420 r = radeon_bo_reserve(bo, true);
422 mmap_read_unlock(current->mm);
426 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
427 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
428 radeon_bo_unreserve(bo);
429 mmap_read_unlock(current->mm);
434 r = drm_gem_handle_create(filp, gobj, &handle);
435 /* drop reference from allocate - handle holds it now */
436 drm_gem_object_put(gobj);
440 args->handle = handle;
441 up_read(&rdev->exclusive_lock);
445 drm_gem_object_put(gobj);
448 up_read(&rdev->exclusive_lock);
449 r = radeon_gem_handle_lockup(rdev, r);
454 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
455 struct drm_file *filp)
457 /* transition the BO to a domain -
458 * just validate the BO into a certain domain */
459 struct radeon_device *rdev = dev->dev_private;
460 struct drm_radeon_gem_set_domain *args = data;
461 struct drm_gem_object *gobj;
464 /* for now if someone requests domain CPU -
465 * just make sure the buffer is finished with */
466 down_read(&rdev->exclusive_lock);
468 /* just do a BO wait for now */
469 gobj = drm_gem_object_lookup(filp, args->handle);
471 up_read(&rdev->exclusive_lock);
475 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
477 drm_gem_object_put(gobj);
478 up_read(&rdev->exclusive_lock);
479 r = radeon_gem_handle_lockup(rdev, r);
483 int radeon_mode_dumb_mmap(struct drm_file *filp,
484 struct drm_device *dev,
485 uint32_t handle, uint64_t *offset_p)
487 struct drm_gem_object *gobj;
488 struct radeon_bo *robj;
490 gobj = drm_gem_object_lookup(filp, handle);
494 robj = gem_to_radeon_bo(gobj);
495 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
496 drm_gem_object_put(gobj);
499 *offset_p = radeon_bo_mmap_offset(robj);
500 drm_gem_object_put(gobj);
504 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
505 struct drm_file *filp)
507 struct drm_radeon_gem_mmap *args = data;
509 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
512 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
513 struct drm_file *filp)
515 struct drm_radeon_gem_busy *args = data;
516 struct drm_gem_object *gobj;
517 struct radeon_bo *robj;
519 uint32_t cur_placement = 0;
521 gobj = drm_gem_object_lookup(filp, args->handle);
525 robj = gem_to_radeon_bo(gobj);
527 r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
533 cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
534 args->domain = radeon_mem_type_to_domain(cur_placement);
535 drm_gem_object_put(gobj);
539 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
540 struct drm_file *filp)
542 struct radeon_device *rdev = dev->dev_private;
543 struct drm_radeon_gem_wait_idle *args = data;
544 struct drm_gem_object *gobj;
545 struct radeon_bo *robj;
547 uint32_t cur_placement = 0;
550 gobj = drm_gem_object_lookup(filp, args->handle);
554 robj = gem_to_radeon_bo(gobj);
556 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
563 /* Flush HDP cache via MMIO if necessary */
564 cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
565 if (rdev->asic->mmio_hdp_flush &&
566 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
567 robj->rdev->asic->mmio_hdp_flush(rdev);
568 drm_gem_object_put(gobj);
569 r = radeon_gem_handle_lockup(rdev, r);
573 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
574 struct drm_file *filp)
576 struct drm_radeon_gem_set_tiling *args = data;
577 struct drm_gem_object *gobj;
578 struct radeon_bo *robj;
581 DRM_DEBUG("%d \n", args->handle);
582 gobj = drm_gem_object_lookup(filp, args->handle);
585 robj = gem_to_radeon_bo(gobj);
586 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
587 drm_gem_object_put(gobj);
591 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
592 struct drm_file *filp)
594 struct drm_radeon_gem_get_tiling *args = data;
595 struct drm_gem_object *gobj;
596 struct radeon_bo *rbo;
600 gobj = drm_gem_object_lookup(filp, args->handle);
603 rbo = gem_to_radeon_bo(gobj);
604 r = radeon_bo_reserve(rbo, false);
605 if (unlikely(r != 0))
607 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
608 radeon_bo_unreserve(rbo);
610 drm_gem_object_put(gobj);
615 * radeon_gem_va_update_vm -update the bo_va in its VM
617 * @rdev: radeon_device pointer
618 * @bo_va: bo_va to update
620 * Update the bo_va directly after setting it's address. Errors are not
621 * vital here, so they are not reported back to userspace.
623 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
624 struct radeon_bo_va *bo_va)
626 struct ttm_validate_buffer tv, *entry;
627 struct radeon_bo_list *vm_bos;
628 struct ww_acquire_ctx ticket;
629 struct list_head list;
633 INIT_LIST_HEAD(&list);
635 tv.bo = &bo_va->bo->tbo;
637 list_add(&tv.head, &list);
639 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
643 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
647 list_for_each_entry(entry, &list, head) {
648 domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
649 /* if anything is swapped out don't swap it in here,
650 just abort and wait for the next CS */
651 if (domain == RADEON_GEM_DOMAIN_CPU)
652 goto error_unreserve;
655 mutex_lock(&bo_va->vm->mutex);
656 r = radeon_vm_clear_freed(rdev, bo_va->vm);
661 r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
664 mutex_unlock(&bo_va->vm->mutex);
667 ttm_eu_backoff_reservation(&ticket, &list);
672 if (r && r != -ERESTARTSYS)
673 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
676 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
677 struct drm_file *filp)
679 struct drm_radeon_gem_va *args = data;
680 struct drm_gem_object *gobj;
681 struct radeon_device *rdev = dev->dev_private;
682 struct radeon_fpriv *fpriv = filp->driver_priv;
683 struct radeon_bo *rbo;
684 struct radeon_bo_va *bo_va;
688 if (!rdev->vm_manager.enabled) {
689 args->operation = RADEON_VA_RESULT_ERROR;
694 * We don't support vm_id yet, to be sure we don't have broken
695 * userspace, reject anyone trying to use non 0 value thus moving
696 * forward we can use those fields without breaking existant userspace
699 args->operation = RADEON_VA_RESULT_ERROR;
703 if (args->offset < RADEON_VA_RESERVED_SIZE) {
705 "offset 0x%lX is in reserved area 0x%X\n",
706 (unsigned long)args->offset,
707 RADEON_VA_RESERVED_SIZE);
708 args->operation = RADEON_VA_RESULT_ERROR;
712 /* don't remove, we need to enforce userspace to set the snooped flag
713 * otherwise we will endup with broken userspace and we won't be able
714 * to enable this feature without adding new interface
716 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
717 if ((args->flags & invalid_flags)) {
718 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
719 args->flags, invalid_flags);
720 args->operation = RADEON_VA_RESULT_ERROR;
724 switch (args->operation) {
726 case RADEON_VA_UNMAP:
729 dev_err(dev->dev, "unsupported operation %d\n",
731 args->operation = RADEON_VA_RESULT_ERROR;
735 gobj = drm_gem_object_lookup(filp, args->handle);
737 args->operation = RADEON_VA_RESULT_ERROR;
740 rbo = gem_to_radeon_bo(gobj);
741 r = radeon_bo_reserve(rbo, false);
743 args->operation = RADEON_VA_RESULT_ERROR;
744 drm_gem_object_put(gobj);
747 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
749 args->operation = RADEON_VA_RESULT_ERROR;
750 radeon_bo_unreserve(rbo);
751 drm_gem_object_put(gobj);
755 switch (args->operation) {
757 if (bo_va->it.start) {
758 args->operation = RADEON_VA_RESULT_VA_EXIST;
759 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
760 radeon_bo_unreserve(rbo);
763 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
765 case RADEON_VA_UNMAP:
766 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
772 radeon_gem_va_update_vm(rdev, bo_va);
773 args->operation = RADEON_VA_RESULT_OK;
775 args->operation = RADEON_VA_RESULT_ERROR;
778 drm_gem_object_put(gobj);
782 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
783 struct drm_file *filp)
785 struct drm_radeon_gem_op *args = data;
786 struct drm_gem_object *gobj;
787 struct radeon_bo *robj;
790 gobj = drm_gem_object_lookup(filp, args->handle);
794 robj = gem_to_radeon_bo(gobj);
797 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
800 r = radeon_bo_reserve(robj, false);
805 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
806 args->value = robj->initial_domain;
808 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
809 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
810 RADEON_GEM_DOMAIN_GTT |
811 RADEON_GEM_DOMAIN_CPU);
817 radeon_bo_unreserve(robj);
819 drm_gem_object_put(gobj);
823 int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
826 int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
831 pitch_mask = align_large ? 255 : 127;
834 pitch_mask = align_large ? 127 : 31;
838 pitch_mask = align_large ? 63 : 15;
842 aligned += pitch_mask;
843 aligned &= ~pitch_mask;
844 return aligned * cpp;
847 int radeon_mode_dumb_create(struct drm_file *file_priv,
848 struct drm_device *dev,
849 struct drm_mode_create_dumb *args)
851 struct radeon_device *rdev = dev->dev_private;
852 struct drm_gem_object *gobj;
856 args->pitch = radeon_align_pitch(rdev, args->width,
857 DIV_ROUND_UP(args->bpp, 8), 0);
858 args->size = (u64)args->pitch * args->height;
859 args->size = ALIGN(args->size, PAGE_SIZE);
861 r = radeon_gem_object_create(rdev, args->size, 0,
862 RADEON_GEM_DOMAIN_VRAM, 0,
867 r = drm_gem_handle_create(file_priv, gobj, &handle);
868 /* drop reference from allocate - handle holds it now */
869 drm_gem_object_put(gobj);
873 args->handle = handle;
877 #if defined(CONFIG_DEBUG_FS)
878 static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
880 struct radeon_device *rdev = m->private;
881 struct radeon_bo *rbo;
884 mutex_lock(&rdev->gem.mutex);
885 list_for_each_entry(rbo, &rdev->gem.objects, list) {
887 const char *placement;
889 domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
891 case RADEON_GEM_DOMAIN_VRAM:
894 case RADEON_GEM_DOMAIN_GTT:
897 case RADEON_GEM_DOMAIN_CPU:
902 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
903 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
904 placement, (unsigned long)rbo->pid);
907 mutex_unlock(&rdev->gem.mutex);
911 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
914 void radeon_gem_debugfs_init(struct radeon_device *rdev)
916 #if defined(CONFIG_DEBUG_FS)
917 struct dentry *root = rdev->ddev->primary->debugfs_root;
919 debugfs_create_file("radeon_gem_info", 0444, root, rdev,
920 &radeon_debugfs_gem_info_fops);