3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/drm_vma_manager.h>
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <linux/pfn_t.h>
18 #include <drm/exynos_drm.h>
20 #include "exynos_drm_drv.h"
21 #include "exynos_drm_gem.h"
22 #include "exynos_drm_iommu.h"
24 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
26 struct drm_device *dev = exynos_gem->base.dev;
28 unsigned int nr_pages;
32 if (exynos_gem->dma_addr) {
33 DRM_DEBUG_KMS("already allocated.\n");
37 exynos_gem->dma_attrs = 0;
40 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
41 * region will be allocated else physically contiguous
44 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
45 exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
48 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
49 * else cachable mapping.
51 if (exynos_gem->flags & EXYNOS_BO_WC ||
52 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
53 attr = DMA_ATTR_WRITE_COMBINE;
55 attr = DMA_ATTR_NON_CONSISTENT;
57 exynos_gem->dma_attrs |= attr;
58 exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
60 nr_pages = exynos_gem->size >> PAGE_SHIFT;
62 exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
63 if (!exynos_gem->pages) {
64 DRM_ERROR("failed to allocate pages.\n");
68 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
69 &exynos_gem->dma_addr, GFP_KERNEL,
70 exynos_gem->dma_attrs);
71 if (!exynos_gem->cookie) {
72 DRM_ERROR("failed to allocate buffer.\n");
76 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
77 exynos_gem->dma_addr, exynos_gem->size,
78 exynos_gem->dma_attrs);
80 DRM_ERROR("failed to get sgtable.\n");
84 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
86 DRM_ERROR("invalid sgtable.\n");
93 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
94 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
101 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
102 exynos_gem->dma_addr, exynos_gem->dma_attrs);
104 drm_free_large(exynos_gem->pages);
109 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
111 struct drm_device *dev = exynos_gem->base.dev;
113 if (!exynos_gem->dma_addr) {
114 DRM_DEBUG_KMS("dma_addr is invalid.\n");
118 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
119 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
121 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
122 (dma_addr_t)exynos_gem->dma_addr,
123 exynos_gem->dma_attrs);
125 drm_free_large(exynos_gem->pages);
128 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
129 struct drm_file *file_priv,
130 unsigned int *handle)
135 * allocate a id of idr table where the obj is registered
136 * and handle has the id what user can see.
138 ret = drm_gem_handle_create(file_priv, obj, handle);
142 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
144 /* drop reference from allocate - handle holds it now. */
145 drm_gem_object_unreference_unlocked(obj);
150 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
152 struct drm_gem_object *obj = &exynos_gem->base;
154 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
157 * do not release memory region from exporter.
159 * the region will be released by exporter
160 * once dmabuf's refcount becomes 0.
162 if (obj->import_attach)
163 drm_prime_gem_destroy(obj, exynos_gem->sgt);
165 exynos_drm_free_buf(exynos_gem);
167 /* release file pointer to gem object. */
168 drm_gem_object_release(obj);
173 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
174 unsigned int gem_handle,
175 struct drm_file *file_priv)
177 struct exynos_drm_gem *exynos_gem;
178 struct drm_gem_object *obj;
180 obj = drm_gem_object_lookup(file_priv, gem_handle);
182 DRM_ERROR("failed to lookup gem object.\n");
186 exynos_gem = to_exynos_gem(obj);
188 drm_gem_object_unreference_unlocked(obj);
190 return exynos_gem->size;
193 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
196 struct exynos_drm_gem *exynos_gem;
197 struct drm_gem_object *obj;
200 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
202 return ERR_PTR(-ENOMEM);
204 exynos_gem->size = size;
205 obj = &exynos_gem->base;
207 ret = drm_gem_object_init(dev, obj, size);
209 DRM_ERROR("failed to initialize gem object\n");
214 ret = drm_gem_create_mmap_offset(obj);
216 drm_gem_object_release(obj);
221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
226 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
230 struct exynos_drm_gem *exynos_gem;
233 if (flags & ~(EXYNOS_BO_MASK)) {
234 DRM_ERROR("invalid flags.\n");
235 return ERR_PTR(-EINVAL);
239 DRM_ERROR("invalid size.\n");
240 return ERR_PTR(-EINVAL);
243 size = roundup(size, PAGE_SIZE);
245 exynos_gem = exynos_drm_gem_init(dev, size);
246 if (IS_ERR(exynos_gem))
249 /* set memory type and cache attribute from user side. */
250 exynos_gem->flags = flags;
252 ret = exynos_drm_alloc_buf(exynos_gem);
254 drm_gem_object_release(&exynos_gem->base);
262 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *file_priv)
265 struct drm_exynos_gem_create *args = data;
266 struct exynos_drm_gem *exynos_gem;
269 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
270 if (IS_ERR(exynos_gem))
271 return PTR_ERR(exynos_gem);
273 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
276 exynos_drm_gem_destroy(exynos_gem);
283 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
284 struct drm_file *file_priv)
286 struct drm_exynos_gem_map *args = data;
288 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
292 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
293 unsigned int gem_handle,
294 struct drm_file *filp)
296 struct exynos_drm_gem *exynos_gem;
297 struct drm_gem_object *obj;
299 obj = drm_gem_object_lookup(filp, gem_handle);
301 DRM_ERROR("failed to lookup gem object.\n");
302 return ERR_PTR(-EINVAL);
305 exynos_gem = to_exynos_gem(obj);
307 return &exynos_gem->dma_addr;
310 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
311 unsigned int gem_handle,
312 struct drm_file *filp)
314 struct drm_gem_object *obj;
316 obj = drm_gem_object_lookup(filp, gem_handle);
318 DRM_ERROR("failed to lookup gem object.\n");
322 drm_gem_object_unreference_unlocked(obj);
325 * decrease obj->refcount one more time because we has already
326 * increased it at exynos_drm_gem_get_dma_addr().
328 drm_gem_object_unreference_unlocked(obj);
331 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
332 struct vm_area_struct *vma)
334 struct drm_device *drm_dev = exynos_gem->base.dev;
335 unsigned long vm_size;
338 vma->vm_flags &= ~VM_PFNMAP;
341 vm_size = vma->vm_end - vma->vm_start;
343 /* check if user-requested size is valid. */
344 if (vm_size > exynos_gem->size)
347 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
348 exynos_gem->dma_addr, exynos_gem->size,
349 exynos_gem->dma_attrs);
351 DRM_ERROR("failed to mmap.\n");
358 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
359 struct drm_file *file_priv)
361 struct exynos_drm_gem *exynos_gem;
362 struct drm_exynos_gem_info *args = data;
363 struct drm_gem_object *obj;
365 obj = drm_gem_object_lookup(file_priv, args->handle);
367 DRM_ERROR("failed to lookup gem object.\n");
371 exynos_gem = to_exynos_gem(obj);
373 args->flags = exynos_gem->flags;
374 args->size = exynos_gem->size;
376 drm_gem_object_unreference_unlocked(obj);
381 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
383 exynos_drm_gem_destroy(to_exynos_gem(obj));
386 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
387 struct drm_device *dev,
388 struct drm_mode_create_dumb *args)
390 struct exynos_drm_gem *exynos_gem;
395 * allocate memory to be used for framebuffer.
396 * - this callback would be called by user application
397 * with DRM_IOCTL_MODE_CREATE_DUMB command.
400 args->pitch = args->width * ((args->bpp + 7) / 8);
401 args->size = args->pitch * args->height;
403 if (is_drm_iommu_supported(dev))
404 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
406 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
408 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
409 if (IS_ERR(exynos_gem)) {
410 dev_warn(dev->dev, "FB allocation failed.\n");
411 return PTR_ERR(exynos_gem);
414 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
417 exynos_drm_gem_destroy(exynos_gem);
424 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
425 struct drm_device *dev, uint32_t handle,
428 struct drm_gem_object *obj;
432 * get offset of memory allocated for drm framebuffer.
433 * - this callback would be called by user application
434 * with DRM_IOCTL_MODE_MAP_DUMB command.
437 obj = drm_gem_object_lookup(file_priv, handle);
439 DRM_ERROR("failed to lookup gem object.\n");
443 *offset = drm_vma_node_offset_addr(&obj->vma_node);
444 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
446 drm_gem_object_unreference_unlocked(obj);
450 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
452 struct drm_gem_object *obj = vma->vm_private_data;
453 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
458 page_offset = ((unsigned long)vmf->virtual_address -
459 vma->vm_start) >> PAGE_SHIFT;
461 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
462 DRM_ERROR("invalid page offset\n");
467 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
468 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
469 __pfn_to_pfn_t(pfn, PFN_DEV));
476 return VM_FAULT_NOPAGE;
480 return VM_FAULT_SIGBUS;
484 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
485 struct vm_area_struct *vma)
487 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
490 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
492 /* non-cachable as default. */
493 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
494 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
495 else if (exynos_gem->flags & EXYNOS_BO_WC)
497 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
500 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
502 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
509 drm_gem_vm_close(vma);
514 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
516 struct drm_gem_object *obj;
519 /* set vm_area_struct. */
520 ret = drm_gem_mmap(filp, vma);
522 DRM_ERROR("failed to mmap.\n");
526 obj = vma->vm_private_data;
528 if (obj->import_attach)
529 return dma_buf_mmap(obj->dma_buf, vma, 0);
531 return exynos_drm_gem_mmap_obj(obj, vma);
534 /* low-level interface prime helpers */
535 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
537 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
540 npages = exynos_gem->size >> PAGE_SHIFT;
542 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
545 struct drm_gem_object *
546 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
547 struct dma_buf_attachment *attach,
548 struct sg_table *sgt)
550 struct exynos_drm_gem *exynos_gem;
554 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
555 if (IS_ERR(exynos_gem)) {
556 ret = PTR_ERR(exynos_gem);
560 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
562 npages = exynos_gem->size >> PAGE_SHIFT;
563 exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
564 if (!exynos_gem->pages) {
569 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
574 exynos_gem->sgt = sgt;
576 if (sgt->nents == 1) {
577 /* always physically continuous memory if sgt->nents is 1. */
578 exynos_gem->flags |= EXYNOS_BO_CONTIG;
581 * this case could be CONTIG or NONCONTIG type but for now
583 * TODO. we have to find a way that exporter can notify
584 * the type of its own buffer to importer.
586 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
589 return &exynos_gem->base;
592 drm_free_large(exynos_gem->pages);
594 drm_gem_object_release(&exynos_gem->base);
599 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
604 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
609 int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
610 struct vm_area_struct *vma)
614 ret = drm_gem_mmap_obj(obj, obj->size, vma);
618 return exynos_drm_gem_mmap_obj(obj, vma);