1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Russell King
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/mman.h>
9 #include <linux/shmem_fs.h>
11 #include <drm/armada_drm.h>
12 #include <drm/drm_prime.h>
14 #include "armada_drm.h"
15 #include "armada_gem.h"
16 #include "armada_ioctlP.h"
18 MODULE_IMPORT_NS(DMA_BUF);
20 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
22 struct drm_gem_object *gobj = vmf->vma->vm_private_data;
23 struct armada_gem_object *obj = drm_to_armada_gem(gobj);
24 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
26 pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
27 return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
30 static const struct vm_operations_struct armada_gem_vm_ops = {
31 .fault = armada_gem_vm_fault,
32 .open = drm_gem_vm_open,
33 .close = drm_gem_vm_close,
36 static size_t roundup_gem_size(size_t size)
38 return roundup(size, PAGE_SIZE);
41 void armada_gem_free_object(struct drm_gem_object *obj)
43 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
44 struct armada_private *priv = drm_to_armada_dev(obj->dev);
46 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
48 drm_gem_free_mmap_offset(&dobj->obj);
50 might_lock(&priv->linear_lock);
53 /* page backed memory */
54 unsigned int order = get_order(dobj->obj.size);
55 __free_pages(dobj->page, order);
56 } else if (dobj->linear) {
57 /* linear backed memory */
58 mutex_lock(&priv->linear_lock);
59 drm_mm_remove_node(dobj->linear);
60 mutex_unlock(&priv->linear_lock);
66 if (dobj->obj.import_attach) {
67 /* We only ever display imported data */
69 dma_buf_unmap_attachment_unlocked(dobj->obj.import_attach,
70 dobj->sgt, DMA_TO_DEVICE);
71 drm_prime_gem_destroy(&dobj->obj, NULL);
74 drm_gem_object_release(&dobj->obj);
80 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
82 struct armada_private *priv = drm_to_armada_dev(dev);
83 size_t size = obj->obj.size;
85 if (obj->page || obj->linear)
89 * If it is a small allocation (typically cursor, which will
90 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
91 * Framebuffers will never be this small (our minimum size for
92 * framebuffers is larger than this anyway.) Such objects are
93 * only accessed by the CPU so we don't need any special handing
97 unsigned int order = get_order(size);
98 struct page *p = alloc_pages(GFP_KERNEL, order);
101 obj->addr = page_address(p);
102 obj->phys_addr = page_to_phys(p);
105 memset(obj->addr, 0, PAGE_ALIGN(size));
110 * We could grab something from DMA if it's enabled, but that
111 * involves building in a problem:
113 * GEM DMA helper interface uses dma_alloc_coherent(), which provides
114 * us with an CPU virtual address and a device address.
116 * The CPU virtual address may be either an address in the kernel
117 * direct mapped region (for example, as it would be on x86) or
118 * it may be remapped into another part of kernel memory space
119 * (eg, as it would be on ARM.) This means virt_to_phys() on the
120 * returned virtual address is invalid depending on the architecture
123 * The device address may also not be a physical address; it may
124 * be that there is some kind of remapping between the device and
125 * system RAM, which makes the use of the device address also
126 * unsafe to re-use as a physical address.
128 * This makes DRM usage of dma_alloc_coherent() in a generic way
129 * at best very questionable and unsafe.
132 /* Otherwise, grab it from our linear allocation */
134 struct drm_mm_node *node;
135 unsigned align = min_t(unsigned, size, SZ_2M);
139 node = kzalloc(sizeof(*node), GFP_KERNEL);
143 mutex_lock(&priv->linear_lock);
144 ret = drm_mm_insert_node_generic(&priv->linear, node,
146 mutex_unlock(&priv->linear_lock);
154 /* Ensure that the memory we're returning is cleared. */
155 ptr = ioremap_wc(obj->linear->start, size);
157 mutex_lock(&priv->linear_lock);
158 drm_mm_remove_node(obj->linear);
159 mutex_unlock(&priv->linear_lock);
165 memset_io(ptr, 0, size);
168 obj->phys_addr = obj->linear->start;
169 obj->dev_addr = obj->linear->start;
173 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
174 (unsigned long long)obj->phys_addr,
175 (unsigned long long)obj->dev_addr);
181 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
183 /* only linear objects need to be ioremap'd */
184 if (!dobj->addr && dobj->linear)
185 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
189 static const struct drm_gem_object_funcs armada_gem_object_funcs = {
190 .free = armada_gem_free_object,
191 .export = armada_gem_prime_export,
192 .vm_ops = &armada_gem_vm_ops,
195 struct armada_gem_object *
196 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
198 struct armada_gem_object *obj;
200 size = roundup_gem_size(size);
202 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
206 obj->obj.funcs = &armada_gem_object_funcs;
208 drm_gem_private_object_init(dev, &obj->obj, size);
210 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
215 static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
218 struct armada_gem_object *obj;
219 struct address_space *mapping;
221 size = roundup_gem_size(size);
223 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
227 obj->obj.funcs = &armada_gem_object_funcs;
229 if (drm_gem_object_init(dev, &obj->obj, size)) {
234 mapping = obj->obj.filp->f_mapping;
235 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
237 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
242 /* Dumb alloc support */
243 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
244 struct drm_mode_create_dumb *args)
246 struct armada_gem_object *dobj;
251 args->pitch = armada_pitch(args->width, args->bpp);
252 args->size = size = args->pitch * args->height;
254 dobj = armada_gem_alloc_private_object(dev, size);
258 ret = armada_gem_linear_back(dev, dobj);
262 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
266 args->handle = handle;
268 /* drop reference from allocate - handle holds it now */
269 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
271 drm_gem_object_put(&dobj->obj);
275 /* Private driver gem ioctls */
276 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
277 struct drm_file *file)
279 struct drm_armada_gem_create *args = data;
280 struct armada_gem_object *dobj;
290 dobj = armada_gem_alloc_object(dev, size);
294 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
298 args->handle = handle;
300 /* drop reference from allocate - handle holds it now */
301 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
303 drm_gem_object_put(&dobj->obj);
307 /* Map a shmem-backed object into process memory space */
308 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
309 struct drm_file *file)
311 struct drm_armada_gem_mmap *args = data;
312 struct armada_gem_object *dobj;
315 dobj = armada_gem_object_lookup(file, args->handle);
319 if (!dobj->obj.filp) {
320 drm_gem_object_put(&dobj->obj);
324 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
325 MAP_SHARED, args->offset);
326 drm_gem_object_put(&dobj->obj);
327 if (IS_ERR_VALUE(addr))
335 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
336 struct drm_file *file)
338 struct drm_armada_gem_pwrite *args = data;
339 struct armada_gem_object *dobj;
343 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
344 args->handle, args->offset, args->size, args->ptr);
349 ptr = (char __user *)(uintptr_t)args->ptr;
351 if (!access_ok(ptr, args->size))
354 if (fault_in_readable(ptr, args->size))
357 dobj = armada_gem_object_lookup(file, args->handle);
361 /* Must be a kernel-mapped object */
365 if (args->offset > dobj->obj.size ||
366 args->size > dobj->obj.size - args->offset) {
367 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
372 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
374 } else if (dobj->update) {
375 dobj->update(dobj->update_data);
380 drm_gem_object_put(&dobj->obj);
385 static struct sg_table *
386 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
387 enum dma_data_direction dir)
389 struct drm_gem_object *obj = attach->dmabuf->priv;
390 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
391 struct scatterlist *sg;
392 struct sg_table *sgt;
395 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
399 if (dobj->obj.filp) {
400 struct address_space *mapping;
403 count = dobj->obj.size / PAGE_SIZE;
404 if (sg_alloc_table(sgt, count, GFP_KERNEL))
407 mapping = dobj->obj.filp->f_mapping;
409 for_each_sgtable_sg(sgt, sg, i) {
412 page = shmem_read_mapping_page(mapping, i);
416 sg_set_page(sg, page, PAGE_SIZE, 0);
419 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
421 } else if (dobj->page) {
422 /* Single contiguous page */
423 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
426 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
428 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
430 } else if (dobj->linear) {
431 /* Single contiguous physical region - no struct page */
432 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
434 sg_dma_address(sgt->sgl) = dobj->dev_addr;
435 sg_dma_len(sgt->sgl) = dobj->obj.size;
442 for_each_sgtable_sg(sgt, sg, i)
444 put_page(sg_page(sg));
452 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
453 struct sg_table *sgt, enum dma_data_direction dir)
455 struct drm_gem_object *obj = attach->dmabuf->priv;
456 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
460 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
462 if (dobj->obj.filp) {
463 struct scatterlist *sg;
465 for_each_sgtable_sg(sgt, sg, i)
466 put_page(sg_page(sg));
474 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
479 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
480 .map_dma_buf = armada_gem_prime_map_dma_buf,
481 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
482 .release = drm_gem_dmabuf_release,
483 .mmap = armada_gem_dmabuf_mmap,
487 armada_gem_prime_export(struct drm_gem_object *obj, int flags)
489 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
491 exp_info.ops = &armada_gem_prime_dmabuf_ops;
492 exp_info.size = obj->size;
493 exp_info.flags = O_RDWR;
496 return drm_gem_dmabuf_export(obj->dev, &exp_info);
499 struct drm_gem_object *
500 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
502 struct dma_buf_attachment *attach;
503 struct armada_gem_object *dobj;
505 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
506 struct drm_gem_object *obj = buf->priv;
507 if (obj->dev == dev) {
509 * Importing our own dmabuf(s) increases the
510 * refcount on the gem object itself.
512 drm_gem_object_get(obj);
517 attach = dma_buf_attach(buf, dev->dev);
519 return ERR_CAST(attach);
521 dobj = armada_gem_alloc_private_object(dev, buf->size);
523 dma_buf_detach(buf, attach);
524 return ERR_PTR(-ENOMEM);
527 dobj->obj.import_attach = attach;
531 * Don't call dma_buf_map_attachment() here - it maps the
532 * scatterlist immediately for DMA, and this is not always
533 * an appropriate thing to do.
538 int armada_gem_map_import(struct armada_gem_object *dobj)
542 dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach,
544 if (IS_ERR(dobj->sgt)) {
545 ret = PTR_ERR(dobj->sgt);
547 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
550 if (dobj->sgt->nents > 1) {
551 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
554 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
555 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
558 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);