3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/drm_vma_manager.h>
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <linux/pfn_t.h>
18 #include <drm/exynos_drm.h>
20 #include "exynos_drm_drv.h"
21 #include "exynos_drm_gem.h"
22 #include "exynos_drm_iommu.h"
24 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
26 struct drm_device *dev = exynos_gem->base.dev;
28 unsigned int nr_pages;
32 if (exynos_gem->dma_addr) {
33 DRM_DEBUG_KMS("already allocated.\n");
37 init_dma_attrs(&exynos_gem->dma_attrs);
40 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
41 * region will be allocated else physically contiguous
44 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
45 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
48 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
49 * else cachable mapping.
51 if (exynos_gem->flags & EXYNOS_BO_WC ||
52 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
53 attr = DMA_ATTR_WRITE_COMBINE;
55 attr = DMA_ATTR_NON_CONSISTENT;
57 dma_set_attr(attr, &exynos_gem->dma_attrs);
58 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
60 nr_pages = exynos_gem->size >> PAGE_SHIFT;
62 exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
63 if (!exynos_gem->pages) {
64 DRM_ERROR("failed to allocate pages.\n");
68 exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
69 &exynos_gem->dma_addr, GFP_KERNEL,
70 &exynos_gem->dma_attrs);
71 if (!exynos_gem->cookie) {
72 DRM_ERROR("failed to allocate buffer.\n");
76 ret = dma_get_sgtable_attrs(dev->dev, &sgt, exynos_gem->cookie,
77 exynos_gem->dma_addr, exynos_gem->size,
78 &exynos_gem->dma_attrs);
80 DRM_ERROR("failed to get sgtable.\n");
84 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
86 DRM_ERROR("invalid sgtable.\n");
93 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
94 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
101 dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
102 exynos_gem->dma_addr, &exynos_gem->dma_attrs);
104 drm_free_large(exynos_gem->pages);
109 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
111 struct drm_device *dev = exynos_gem->base.dev;
113 if (!exynos_gem->dma_addr) {
114 DRM_DEBUG_KMS("dma_addr is invalid.\n");
118 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
119 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
121 dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
122 (dma_addr_t)exynos_gem->dma_addr,
123 &exynos_gem->dma_attrs);
125 drm_free_large(exynos_gem->pages);
128 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
129 struct drm_file *file_priv,
130 unsigned int *handle)
135 * allocate a id of idr table where the obj is registered
136 * and handle has the id what user can see.
138 ret = drm_gem_handle_create(file_priv, obj, handle);
142 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
144 /* drop reference from allocate - handle holds it now. */
145 drm_gem_object_unreference_unlocked(obj);
150 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
152 struct drm_gem_object *obj = &exynos_gem->base;
154 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
157 * do not release memory region from exporter.
159 * the region will be released by exporter
160 * once dmabuf's refcount becomes 0.
162 if (obj->import_attach)
163 drm_prime_gem_destroy(obj, exynos_gem->sgt);
165 exynos_drm_free_buf(exynos_gem);
167 /* release file pointer to gem object. */
168 drm_gem_object_release(obj);
173 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
174 unsigned int gem_handle,
175 struct drm_file *file_priv)
177 struct exynos_drm_gem *exynos_gem;
178 struct drm_gem_object *obj;
180 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
182 DRM_ERROR("failed to lookup gem object.\n");
186 exynos_gem = to_exynos_gem(obj);
188 drm_gem_object_unreference_unlocked(obj);
190 return exynos_gem->size;
193 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
196 struct exynos_drm_gem *exynos_gem;
197 struct drm_gem_object *obj;
200 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
202 return ERR_PTR(-ENOMEM);
204 exynos_gem->size = size;
205 obj = &exynos_gem->base;
207 ret = drm_gem_object_init(dev, obj, size);
209 DRM_ERROR("failed to initialize gem object\n");
214 ret = drm_gem_create_mmap_offset(obj);
216 drm_gem_object_release(obj);
221 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
226 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
230 struct exynos_drm_gem *exynos_gem;
233 if (flags & ~(EXYNOS_BO_MASK)) {
234 DRM_ERROR("invalid flags.\n");
235 return ERR_PTR(-EINVAL);
239 DRM_ERROR("invalid size.\n");
240 return ERR_PTR(-EINVAL);
243 size = roundup(size, PAGE_SIZE);
245 exynos_gem = exynos_drm_gem_init(dev, size);
246 if (IS_ERR(exynos_gem))
249 /* set memory type and cache attribute from user side. */
250 exynos_gem->flags = flags;
252 ret = exynos_drm_alloc_buf(exynos_gem);
254 drm_gem_object_release(&exynos_gem->base);
262 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *file_priv)
265 struct drm_exynos_gem_create *args = data;
266 struct exynos_drm_gem *exynos_gem;
269 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
270 if (IS_ERR(exynos_gem))
271 return PTR_ERR(exynos_gem);
273 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
276 exynos_drm_gem_destroy(exynos_gem);
283 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
284 unsigned int gem_handle,
285 struct drm_file *filp)
287 struct exynos_drm_gem *exynos_gem;
288 struct drm_gem_object *obj;
290 obj = drm_gem_object_lookup(dev, filp, gem_handle);
292 DRM_ERROR("failed to lookup gem object.\n");
293 return ERR_PTR(-EINVAL);
296 exynos_gem = to_exynos_gem(obj);
298 return &exynos_gem->dma_addr;
301 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
302 unsigned int gem_handle,
303 struct drm_file *filp)
305 struct drm_gem_object *obj;
307 obj = drm_gem_object_lookup(dev, filp, gem_handle);
309 DRM_ERROR("failed to lookup gem object.\n");
313 drm_gem_object_unreference_unlocked(obj);
316 * decrease obj->refcount one more time because we has already
317 * increased it at exynos_drm_gem_get_dma_addr().
319 drm_gem_object_unreference_unlocked(obj);
322 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
323 struct vm_area_struct *vma)
325 struct drm_device *drm_dev = exynos_gem->base.dev;
326 unsigned long vm_size;
329 vma->vm_flags &= ~VM_PFNMAP;
332 vm_size = vma->vm_end - vma->vm_start;
334 /* check if user-requested size is valid. */
335 if (vm_size > exynos_gem->size)
338 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages,
339 exynos_gem->dma_addr, exynos_gem->size,
340 &exynos_gem->dma_attrs);
342 DRM_ERROR("failed to mmap.\n");
349 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
350 struct drm_file *file_priv)
352 struct exynos_drm_gem *exynos_gem;
353 struct drm_exynos_gem_info *args = data;
354 struct drm_gem_object *obj;
356 mutex_lock(&dev->struct_mutex);
358 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
360 DRM_ERROR("failed to lookup gem object.\n");
361 mutex_unlock(&dev->struct_mutex);
365 exynos_gem = to_exynos_gem(obj);
367 args->flags = exynos_gem->flags;
368 args->size = exynos_gem->size;
370 drm_gem_object_unreference(obj);
371 mutex_unlock(&dev->struct_mutex);
376 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
377 struct sg_table *sgt,
378 enum dma_data_direction dir)
382 mutex_lock(&drm_dev->struct_mutex);
384 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
386 DRM_ERROR("failed to map sgl with dma.\n");
387 mutex_unlock(&drm_dev->struct_mutex);
391 mutex_unlock(&drm_dev->struct_mutex);
395 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
396 struct sg_table *sgt,
397 enum dma_data_direction dir)
399 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
402 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
404 exynos_drm_gem_destroy(to_exynos_gem(obj));
407 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
408 struct drm_device *dev,
409 struct drm_mode_create_dumb *args)
411 struct exynos_drm_gem *exynos_gem;
416 * allocate memory to be used for framebuffer.
417 * - this callback would be called by user application
418 * with DRM_IOCTL_MODE_CREATE_DUMB command.
421 args->pitch = args->width * ((args->bpp + 7) / 8);
422 args->size = args->pitch * args->height;
424 if (is_drm_iommu_supported(dev))
425 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
427 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
429 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
430 if (IS_ERR(exynos_gem)) {
431 dev_warn(dev->dev, "FB allocation failed.\n");
432 return PTR_ERR(exynos_gem);
435 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
438 exynos_drm_gem_destroy(exynos_gem);
445 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
446 struct drm_device *dev, uint32_t handle,
449 struct drm_gem_object *obj;
452 mutex_lock(&dev->struct_mutex);
455 * get offset of memory allocated for drm framebuffer.
456 * - this callback would be called by user application
457 * with DRM_IOCTL_MODE_MAP_DUMB command.
460 obj = drm_gem_object_lookup(dev, file_priv, handle);
462 DRM_ERROR("failed to lookup gem object.\n");
467 *offset = drm_vma_node_offset_addr(&obj->vma_node);
468 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
470 drm_gem_object_unreference(obj);
472 mutex_unlock(&dev->struct_mutex);
476 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
478 struct drm_gem_object *obj = vma->vm_private_data;
479 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
484 page_offset = ((unsigned long)vmf->virtual_address -
485 vma->vm_start) >> PAGE_SHIFT;
487 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
488 DRM_ERROR("invalid page offset\n");
493 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
494 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
495 __pfn_to_pfn_t(pfn, PFN_DEV));
502 return VM_FAULT_NOPAGE;
506 return VM_FAULT_SIGBUS;
510 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
512 struct exynos_drm_gem *exynos_gem;
513 struct drm_gem_object *obj;
516 /* set vm_area_struct. */
517 ret = drm_gem_mmap(filp, vma);
519 DRM_ERROR("failed to mmap.\n");
523 obj = vma->vm_private_data;
524 exynos_gem = to_exynos_gem(obj);
526 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
528 /* non-cachable as default. */
529 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
530 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
531 else if (exynos_gem->flags & EXYNOS_BO_WC)
533 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
536 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
538 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
545 drm_gem_vm_close(vma);
550 /* low-level interface prime helpers */
551 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
553 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
556 npages = exynos_gem->size >> PAGE_SHIFT;
558 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
561 struct drm_gem_object *
562 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
563 struct dma_buf_attachment *attach,
564 struct sg_table *sgt)
566 struct exynos_drm_gem *exynos_gem;
570 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
571 if (IS_ERR(exynos_gem)) {
572 ret = PTR_ERR(exynos_gem);
576 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
578 npages = exynos_gem->size >> PAGE_SHIFT;
579 exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
580 if (!exynos_gem->pages) {
585 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
590 exynos_gem->sgt = sgt;
592 if (sgt->nents == 1) {
593 /* always physically continuous memory if sgt->nents is 1. */
594 exynos_gem->flags |= EXYNOS_BO_CONTIG;
597 * this case could be CONTIG or NONCONTIG type but for now
599 * TODO. we have to find a way that exporter can notify
600 * the type of its own buffer to importer.
602 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
605 return &exynos_gem->base;
608 drm_free_large(exynos_gem->pages);
610 drm_gem_object_release(&exynos_gem->base);
615 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
620 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)