1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
7 #include <linux/dma-mapping.h>
8 #include <linux/seq_file.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/spinlock.h>
11 #include <linux/pfn_t.h>
12 #include <linux/vmalloc.h>
14 #include <drm/drm_prime.h>
15 #include <drm/drm_vma_manager.h>
18 #include "omap_dmm_tiler.h"
21 * GEM buffer object implementation.
24 /* note: we use upper 8 bits of flags for driver-internal flags: */
25 #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
26 #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
27 #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
29 struct omap_gem_object {
30 struct drm_gem_object base;
32 struct list_head mm_list;
36 /** width/height for tiled formats (rounded up to slot boundaries) */
39 /** roll applied when mapping to DMM */
42 /** protects pin_cnt, block, pages, dma_addrs and vaddr */
46 * dma_addr contains the buffer DMA address. It is valid for
48 * - buffers allocated through the DMA mapping API (with the
49 * OMAP_BO_MEM_DMA_API flag set)
51 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
52 * if they are physically contiguous
54 * - buffers mapped through the TILER when pin_cnt is not zero, in which
55 * case the DMA address points to the TILER aperture
57 * Physically contiguous buffers have their DMA address equal to the
58 * physical address as we don't remap those buffers through the TILER.
60 * Buffers mapped to the TILER have their DMA address pointing to the
61 * TILER aperture. As TILER mappings are refcounted (through pin_cnt)
62 * the DMA address must be accessed through omap_gem_pin() to ensure
63 * that the mapping won't disappear unexpectedly. References must be
64 * released with omap_gem_unpin().
74 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
75 * is set and the sgt field is valid.
80 * tiler block used when buffer is remapped in DMM/TILER.
82 struct tiler_block *block;
85 * Array of backing pages, if allocated. Note that pages are never
86 * allocated for buffers originally allocated from contiguous memory
90 /** addresses corresponding to pages in above array */
91 dma_addr_t *dma_addrs;
94 * Virtual address, if mapped.
99 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
101 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
102 * not necessarily pinned in TILER all the time, and (b) when they are
103 * they are not necessarily page aligned, we reserve one or more small
104 * regions in each of the 2d containers to use as a user-GART where we
105 * can create a second page-aligned mapping of parts of the buffer
106 * being accessed from userspace.
108 * Note that we could optimize slightly when we know that multiple
109 * tiler containers are backed by the same PAT.. but I'll leave that
112 #define NUM_USERGART_ENTRIES 2
113 struct omap_drm_usergart_entry {
114 struct tiler_block *block; /* the reserved tiler block */
116 struct drm_gem_object *obj; /* the current pinned obj */
117 pgoff_t obj_pgoff; /* page offset of obj currently
121 struct omap_drm_usergart {
122 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
123 int height; /* height in rows */
124 int height_shift; /* ilog2(height in rows) */
125 int slot_shift; /* ilog2(width per slot) */
126 int stride_pfn; /* stride in pages */
127 int last; /* index of last used entry */
130 /* -----------------------------------------------------------------------------
134 /** get mmap offset */
135 u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
137 struct drm_device *dev = obj->dev;
141 /* Make it mmapable */
142 size = omap_gem_mmap_size(obj);
143 ret = drm_gem_create_mmap_offset_size(obj, size);
145 dev_err(dev->dev, "could not allocate mmap offset\n");
149 return drm_vma_node_offset_addr(&obj->vma_node);
152 static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size)
154 return !(drm_prime_get_contiguous_size(sgt) < size);
157 static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
159 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
162 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) &&
163 omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size))
169 /* -----------------------------------------------------------------------------
173 static void omap_gem_evict_entry(struct drm_gem_object *obj,
174 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
176 struct omap_gem_object *omap_obj = to_omap_bo(obj);
177 struct omap_drm_private *priv = obj->dev->dev_private;
178 int n = priv->usergart[fmt].height;
179 size_t size = PAGE_SIZE * n;
180 loff_t off = omap_gem_mmap_offset(obj) +
181 (entry->obj_pgoff << PAGE_SHIFT);
182 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
186 /* if stride > than PAGE_SIZE then sparse mapping: */
187 for (i = n; i > 0; i--) {
188 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
190 off += PAGE_SIZE * m;
193 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
200 /* Evict a buffer from usergart, if it is mapped there */
201 static void omap_gem_evict(struct drm_gem_object *obj)
203 struct omap_gem_object *omap_obj = to_omap_bo(obj);
204 struct omap_drm_private *priv = obj->dev->dev_private;
206 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
207 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
210 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
211 struct omap_drm_usergart_entry *entry =
212 &priv->usergart[fmt].entry[i];
214 if (entry->obj == obj)
215 omap_gem_evict_entry(obj, fmt, entry);
220 /* -----------------------------------------------------------------------------
225 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
228 static int omap_gem_attach_pages(struct drm_gem_object *obj)
230 struct drm_device *dev = obj->dev;
231 struct omap_gem_object *omap_obj = to_omap_bo(obj);
233 int npages = obj->size >> PAGE_SHIFT;
237 lockdep_assert_held(&omap_obj->lock);
240 * If not using shmem (in which case backing pages don't need to be
241 * allocated) or if pages are already allocated we're done.
243 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
246 pages = drm_gem_get_pages(obj);
248 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
249 return PTR_ERR(pages);
252 /* for non-cached buffers, ensure the new pages are clean because
253 * DSS, GPU, etc. are not cache coherent:
255 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
256 addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
262 for (i = 0; i < npages; i++) {
263 addrs[i] = dma_map_page(dev->dev, pages[i],
264 0, PAGE_SIZE, DMA_TO_DEVICE);
266 if (dma_mapping_error(dev->dev, addrs[i])) {
268 "%s: failed to map page\n", __func__);
270 for (i = i - 1; i >= 0; --i) {
271 dma_unmap_page(dev->dev, addrs[i],
272 PAGE_SIZE, DMA_TO_DEVICE);
280 addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
287 omap_obj->dma_addrs = addrs;
288 omap_obj->pages = pages;
295 drm_gem_put_pages(obj, pages, true, false);
300 /* Release backing pages. Must be called with the omap_obj.lock held. */
301 static void omap_gem_detach_pages(struct drm_gem_object *obj)
303 struct omap_gem_object *omap_obj = to_omap_bo(obj);
304 unsigned int npages = obj->size >> PAGE_SHIFT;
307 lockdep_assert_held(&omap_obj->lock);
309 for (i = 0; i < npages; i++) {
310 if (omap_obj->dma_addrs[i])
311 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
312 PAGE_SIZE, DMA_TO_DEVICE);
315 kfree(omap_obj->dma_addrs);
316 omap_obj->dma_addrs = NULL;
318 drm_gem_put_pages(obj, omap_obj->pages, true, false);
319 omap_obj->pages = NULL;
322 /* get buffer flags */
323 u32 omap_gem_flags(struct drm_gem_object *obj)
325 return to_omap_bo(obj)->flags;
329 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
331 struct omap_gem_object *omap_obj = to_omap_bo(obj);
332 size_t size = obj->size;
334 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
335 /* for tiled buffers, the virtual size has stride rounded up
336 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
337 * 32kb later!). But we don't back the entire buffer with
338 * pages, only the valid picture part.. so need to adjust for
339 * this in the size used to mmap and generate mmap offset
341 size = tiler_vsize(gem2fmt(omap_obj->flags),
342 omap_obj->width, omap_obj->height);
348 /* -----------------------------------------------------------------------------
352 /* Normal handling for the case of faulting in non-tiled buffers */
353 static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
354 struct vm_area_struct *vma, struct vm_fault *vmf)
356 struct omap_gem_object *omap_obj = to_omap_bo(obj);
360 /* We don't use vmf->pgoff since that has the fake offset: */
361 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
363 if (omap_obj->pages) {
364 omap_gem_cpu_sync_page(obj, pgoff);
365 pfn = page_to_pfn(omap_obj->pages[pgoff]);
367 BUG_ON(!omap_gem_is_contiguous(omap_obj));
368 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
371 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
372 pfn, pfn << PAGE_SHIFT);
374 return vmf_insert_mixed(vma, vmf->address,
375 __pfn_to_pfn_t(pfn, PFN_DEV));
378 /* Special handling for the case of faulting in 2d tiled buffers */
379 static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
380 struct vm_area_struct *vma, struct vm_fault *vmf)
382 struct omap_gem_object *omap_obj = to_omap_bo(obj);
383 struct omap_drm_private *priv = obj->dev->dev_private;
384 struct omap_drm_usergart_entry *entry;
385 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
386 struct page *pages[64]; /* XXX is this too much to have on stack? */
388 pgoff_t pgoff, base_pgoff;
391 vm_fault_t ret = VM_FAULT_NOPAGE;
394 * Note the height of the slot is also equal to the number of pages
395 * that need to be mapped in to fill 4kb wide CPU page. If the slot
396 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
398 const int n = priv->usergart[fmt].height;
399 const int n_shift = priv->usergart[fmt].height_shift;
402 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
403 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
404 * into account in some of the math, so figure out virtual stride
407 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
409 /* We don't use vmf->pgoff since that has the fake offset: */
410 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
413 * Actual address we start mapping at is rounded down to previous slot
414 * boundary in the y direction:
416 base_pgoff = round_down(pgoff, m << n_shift);
418 /* figure out buffer width in slots */
419 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
421 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
423 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
425 /* evict previous buffer using this usergart entry, if any: */
427 omap_gem_evict_entry(entry->obj, fmt, entry);
430 entry->obj_pgoff = base_pgoff;
432 /* now convert base_pgoff to phys offset from virt offset: */
433 base_pgoff = (base_pgoff >> n_shift) * slots;
435 /* for wider-than 4k.. figure out which part of the slot-row we want: */
438 entry->obj_pgoff += off;
440 slots = min(slots - (off << n_shift), n);
441 base_pgoff += off << n_shift;
442 vaddr += off << PAGE_SHIFT;
446 * Map in pages. Beyond the valid pixel part of the buffer, we set
447 * pages[i] to NULL to get a dummy page mapped in.. if someone
448 * reads/writes it they will get random/undefined content, but at
449 * least it won't be corrupting whatever other random page used to
450 * be mapped in, or other undefined behavior.
452 memcpy(pages, &omap_obj->pages[base_pgoff],
453 sizeof(struct page *) * slots);
454 memset(pages + slots, 0,
455 sizeof(struct page *) * (n - slots));
457 err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
459 ret = vmf_error(err);
460 dev_err(obj->dev->dev, "failed to pin: %d\n", err);
464 pfn = entry->dma_addr >> PAGE_SHIFT;
466 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
467 pfn, pfn << PAGE_SHIFT);
469 for (i = n; i > 0; i--) {
470 ret = vmf_insert_mixed(vma,
471 vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
472 if (ret & VM_FAULT_ERROR)
474 pfn += priv->usergart[fmt].stride_pfn;
475 vaddr += PAGE_SIZE * m;
478 /* simple round-robin: */
479 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
480 % NUM_USERGART_ENTRIES;
486 * omap_gem_fault - pagefault handler for GEM objects
489 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
490 * does most of the work for us including the actual map/unmap calls
491 * but we need to do the actual page work.
493 * The VMA was set up by GEM. In doing so it also ensured that the
494 * vma->vm_private_data points to the GEM object that is backing this
497 static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
499 struct vm_area_struct *vma = vmf->vma;
500 struct drm_gem_object *obj = vma->vm_private_data;
501 struct omap_gem_object *omap_obj = to_omap_bo(obj);
505 /* Make sure we don't parallel update on a fault, nor move or remove
506 * something from beneath our feet
508 mutex_lock(&omap_obj->lock);
510 /* if a shmem backed object, make sure we have pages attached now */
511 err = omap_gem_attach_pages(obj);
513 ret = vmf_error(err);
517 /* where should we do corresponding put_pages().. we are mapping
518 * the original page, rather than thru a GART, so we can't rely
519 * on eviction to trigger this. But munmap() or all mappings should
520 * probably trigger put_pages()?
523 if (omap_obj->flags & OMAP_BO_TILED_MASK)
524 ret = omap_gem_fault_2d(obj, vma, vmf);
526 ret = omap_gem_fault_1d(obj, vma, vmf);
530 mutex_unlock(&omap_obj->lock);
534 static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
536 struct omap_gem_object *omap_obj = to_omap_bo(obj);
538 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_IO | VM_MIXEDMAP);
540 if (omap_obj->flags & OMAP_BO_WC) {
541 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
542 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
543 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
546 * We do have some private objects, at least for scanout buffers
547 * on hardware without DMM/TILER. But these are allocated write-
550 if (WARN_ON(!obj->filp))
554 * Shunt off cached objs to shmem file so they have their own
555 * address_space (so unmap_mapping_range does what we want,
556 * in particular in the case of mmap'd dmabufs)
558 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
559 vma_set_file(vma, obj->filp);
561 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
564 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
569 /* -----------------------------------------------------------------------------
574 * omap_gem_dumb_create - create a dumb buffer
575 * @file: our client file
577 * @args: the requested arguments copied from userspace
579 * Allocate a buffer suitable for use for a frame buffer of the
580 * form described by user space. Give userspace a handle by which
583 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
584 struct drm_mode_create_dumb *args)
586 union omap_gem_size gsize;
588 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
590 args->size = PAGE_ALIGN(args->pitch * args->height);
592 gsize = (union omap_gem_size){
596 return omap_gem_new_handle(dev, file, gsize,
597 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
601 * omap_gem_dumb_map_offset - create an offset for a dumb buffer
602 * @file: our drm client file
604 * @handle: GEM handle to the object (from dumb_create)
605 * @offset: memory map offset placeholder
607 * Do the necessary setup to allow the mapping of the frame buffer
608 * into user memory. We don't have to do much here at the moment.
610 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
611 u32 handle, u64 *offset)
613 struct drm_gem_object *obj;
616 /* GEM does all our handle to object mapping */
617 obj = drm_gem_object_lookup(file, handle);
623 *offset = omap_gem_mmap_offset(obj);
625 drm_gem_object_put(obj);
631 #ifdef CONFIG_DRM_FBDEV_EMULATION
632 /* Set scrolling position. This allows us to implement fast scrolling
635 * Call only from non-atomic contexts.
637 int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
639 struct omap_gem_object *omap_obj = to_omap_bo(obj);
640 u32 npages = obj->size >> PAGE_SHIFT;
644 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
648 omap_obj->roll = roll;
650 mutex_lock(&omap_obj->lock);
652 /* if we aren't mapped yet, we don't need to do anything */
653 if (omap_obj->block) {
654 ret = omap_gem_attach_pages(obj);
658 ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
661 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
665 mutex_unlock(&omap_obj->lock);
671 /* -----------------------------------------------------------------------------
672 * Memory Management & DMA Sync
676 * shmem buffers that are mapped cached are not coherent.
678 * We keep track of dirty pages using page faulting to perform cache management.
679 * When a page is mapped to the CPU in read/write mode the device can't access
680 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
681 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
682 * unmapped from the CPU.
684 static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
686 struct omap_gem_object *omap_obj = to_omap_bo(obj);
688 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
689 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
692 /* Sync the buffer for CPU access.. note pages should already be
693 * attached, ie. omap_gem_get_pages()
695 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
697 struct drm_device *dev = obj->dev;
698 struct omap_gem_object *omap_obj = to_omap_bo(obj);
700 if (omap_gem_is_cached_coherent(obj))
703 if (omap_obj->dma_addrs[pgoff]) {
704 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
705 PAGE_SIZE, DMA_TO_DEVICE);
706 omap_obj->dma_addrs[pgoff] = 0;
710 /* sync the buffer for DMA access */
711 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
712 enum dma_data_direction dir)
714 struct drm_device *dev = obj->dev;
715 struct omap_gem_object *omap_obj = to_omap_bo(obj);
716 int i, npages = obj->size >> PAGE_SHIFT;
717 struct page **pages = omap_obj->pages;
720 if (omap_gem_is_cached_coherent(obj))
723 for (i = 0; i < npages; i++) {
724 if (!omap_obj->dma_addrs[i]) {
727 addr = dma_map_page(dev->dev, pages[i], 0,
729 if (dma_mapping_error(dev->dev, addr)) {
730 dev_warn(dev->dev, "%s: failed to map page\n",
736 omap_obj->dma_addrs[i] = addr;
741 unmap_mapping_range(obj->filp->f_mapping, 0,
742 omap_gem_mmap_size(obj), 1);
746 static int omap_gem_pin_tiler(struct drm_gem_object *obj)
748 struct omap_gem_object *omap_obj = to_omap_bo(obj);
749 u32 npages = obj->size >> PAGE_SHIFT;
750 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
751 struct tiler_block *block;
754 BUG_ON(omap_obj->block);
756 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
757 block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height,
760 block = tiler_reserve_1d(obj->size);
764 ret = PTR_ERR(block);
765 dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt);
769 /* TODO: enable async refill.. */
770 ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true);
772 tiler_release(block);
773 dev_err(obj->dev->dev, "could not pin: %d\n", ret);
777 omap_obj->dma_addr = tiler_ssptr(block);
778 omap_obj->block = block;
780 DBG("got dma address: %pad", &omap_obj->dma_addr);
787 * omap_gem_pin() - Pin a GEM object in memory
788 * @obj: the GEM object
789 * @dma_addr: the DMA address
791 * Pin the given GEM object in memory and fill the dma_addr pointer with the
792 * object's DMA address. If the buffer is not physically contiguous it will be
793 * remapped through the TILER to provide a contiguous view.
795 * Pins are reference-counted, calling this function multiple times is allowed
796 * as long the corresponding omap_gem_unpin() calls are balanced.
798 * Return 0 on success or a negative error code otherwise.
800 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
802 struct omap_drm_private *priv = obj->dev->dev_private;
803 struct omap_gem_object *omap_obj = to_omap_bo(obj);
806 mutex_lock(&omap_obj->lock);
808 if (!omap_gem_is_contiguous(omap_obj)) {
809 if (refcount_read(&omap_obj->pin_cnt) == 0) {
811 refcount_set(&omap_obj->pin_cnt, 1);
813 ret = omap_gem_attach_pages(obj);
817 if (omap_obj->flags & OMAP_BO_SCANOUT) {
819 ret = omap_gem_pin_tiler(obj);
825 refcount_inc(&omap_obj->pin_cnt);
830 *dma_addr = omap_obj->dma_addr;
833 mutex_unlock(&omap_obj->lock);
839 * omap_gem_unpin_locked() - Unpin a GEM object from memory
840 * @obj: the GEM object
842 * omap_gem_unpin() without locking.
844 static void omap_gem_unpin_locked(struct drm_gem_object *obj)
846 struct omap_drm_private *priv = obj->dev->dev_private;
847 struct omap_gem_object *omap_obj = to_omap_bo(obj);
850 if (omap_gem_is_contiguous(omap_obj))
853 if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
855 sg_free_table(omap_obj->sgt);
856 kfree(omap_obj->sgt);
857 omap_obj->sgt = NULL;
859 if (!(omap_obj->flags & OMAP_BO_SCANOUT))
862 ret = tiler_unpin(omap_obj->block);
864 dev_err(obj->dev->dev,
865 "could not unpin pages: %d\n", ret);
867 ret = tiler_release(omap_obj->block);
869 dev_err(obj->dev->dev,
870 "could not release unmap: %d\n", ret);
872 omap_obj->dma_addr = 0;
873 omap_obj->block = NULL;
879 * omap_gem_unpin() - Unpin a GEM object from memory
880 * @obj: the GEM object
882 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
883 * reference-counted, the actual unpin will only be performed when the number
884 * of calls to this function matches the number of calls to omap_gem_pin().
886 void omap_gem_unpin(struct drm_gem_object *obj)
888 struct omap_gem_object *omap_obj = to_omap_bo(obj);
890 mutex_lock(&omap_obj->lock);
891 omap_gem_unpin_locked(obj);
892 mutex_unlock(&omap_obj->lock);
895 /* Get rotated scanout address (only valid if already pinned), at the
896 * specified orientation and x,y offset from top-left corner of buffer
897 * (only valid for tiled 2d buffers)
899 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
900 int x, int y, dma_addr_t *dma_addr)
902 struct omap_gem_object *omap_obj = to_omap_bo(obj);
905 mutex_lock(&omap_obj->lock);
907 if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
908 (omap_obj->flags & OMAP_BO_TILED_MASK)) {
909 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
913 mutex_unlock(&omap_obj->lock);
918 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
919 int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
921 struct omap_gem_object *omap_obj = to_omap_bo(obj);
923 if (omap_obj->flags & OMAP_BO_TILED_MASK)
924 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
928 /* if !remap, and we don't have pages backing, then fail, rather than
929 * increasing the pin count (which we don't really do yet anyways,
930 * because we don't support swapping pages back out). And 'remap'
931 * might not be quite the right name, but I wanted to keep it working
932 * similarly to omap_gem_pin(). Note though that mutex is not
933 * aquired if !remap (because this can be called in atomic ctxt),
934 * but probably omap_gem_unpin() should be changed to work in the
935 * same way. If !remap, a matching omap_gem_put_pages() call is not
936 * required (and should not be made).
938 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
941 struct omap_gem_object *omap_obj = to_omap_bo(obj);
944 mutex_lock(&omap_obj->lock);
947 ret = omap_gem_attach_pages(obj);
952 if (!omap_obj->pages) {
957 *pages = omap_obj->pages;
960 mutex_unlock(&omap_obj->lock);
965 /* release pages when DMA no longer being performed */
966 int omap_gem_put_pages(struct drm_gem_object *obj)
968 /* do something here if we dynamically attach/detach pages.. at
969 * least they would no longer need to be pinned if everyone has
970 * released the pages..
975 struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
976 enum dma_data_direction dir)
978 struct omap_gem_object *omap_obj = to_omap_bo(obj);
980 struct sg_table *sgt;
981 struct scatterlist *sg;
982 unsigned int count, len, stride, i;
985 ret = omap_gem_pin(obj, &addr);
989 mutex_lock(&omap_obj->lock);
995 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1002 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1003 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
1005 len = omap_obj->width << (int)fmt;
1006 count = omap_obj->height;
1007 stride = tiler_stride(fmt, 0);
1014 count = obj->size >> PAGE_SHIFT;
1017 ret = sg_alloc_table(sgt, count, GFP_KERNEL);
1021 /* this must be after omap_gem_pin() to ensure we have pages attached */
1022 omap_gem_dma_sync_buffer(obj, dir);
1025 for_each_sg(sgt->sgl, sg, count, i) {
1026 sg_set_page(sg, pfn_to_page(__phys_to_pfn(addr)),
1027 len, offset_in_page(addr));
1028 sg_dma_address(sg) = addr;
1029 sg_dma_len(sg) = len;
1034 for_each_sg(sgt->sgl, sg, count, i) {
1035 sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
1036 sg_dma_address(sg) = omap_obj->dma_addrs[i];
1037 sg_dma_len(sg) = PAGE_SIZE;
1041 omap_obj->sgt = sgt;
1043 mutex_unlock(&omap_obj->lock);
1049 mutex_unlock(&omap_obj->lock);
1050 omap_gem_unpin(obj);
1051 return ERR_PTR(ret);
1054 void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
1056 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1058 if (WARN_ON(omap_obj->sgt != sgt))
1061 omap_gem_unpin(obj);
1064 #ifdef CONFIG_DRM_FBDEV_EMULATION
1066 * Get kernel virtual address for CPU access.. this more or less only
1067 * exists for omap_fbdev.
1069 void *omap_gem_vaddr(struct drm_gem_object *obj)
1071 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1075 mutex_lock(&omap_obj->lock);
1077 if (!omap_obj->vaddr) {
1078 ret = omap_gem_attach_pages(obj);
1080 vaddr = ERR_PTR(ret);
1084 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
1085 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
1088 vaddr = omap_obj->vaddr;
1091 mutex_unlock(&omap_obj->lock);
1096 /* -----------------------------------------------------------------------------
1101 /* re-pin objects in DMM in resume path: */
1102 int omap_gem_resume(struct drm_device *dev)
1104 struct omap_drm_private *priv = dev->dev_private;
1105 struct omap_gem_object *omap_obj;
1108 mutex_lock(&priv->list_lock);
1109 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1110 if (omap_obj->block) {
1111 struct drm_gem_object *obj = &omap_obj->base;
1112 u32 npages = obj->size >> PAGE_SHIFT;
1114 WARN_ON(!omap_obj->pages); /* this can't happen */
1115 ret = tiler_pin(omap_obj->block,
1116 omap_obj->pages, npages,
1117 omap_obj->roll, true);
1119 dev_err(dev->dev, "could not repin: %d\n", ret);
1126 mutex_unlock(&priv->list_lock);
1131 /* -----------------------------------------------------------------------------
1135 #ifdef CONFIG_DEBUG_FS
1136 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1138 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1141 off = drm_vma_node_start(&obj->vma_node);
1143 mutex_lock(&omap_obj->lock);
1145 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1146 omap_obj->flags, obj->name, kref_read(&obj->refcount),
1147 off, &omap_obj->dma_addr,
1148 refcount_read(&omap_obj->pin_cnt),
1149 omap_obj->vaddr, omap_obj->roll);
1151 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1152 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1153 if (omap_obj->block) {
1154 struct tcm_area *area = &omap_obj->block->area;
1155 seq_printf(m, " (%dx%d, %dx%d)",
1156 area->p0.x, area->p0.y,
1157 area->p1.x, area->p1.y);
1160 seq_printf(m, " %zu", obj->size);
1163 mutex_unlock(&omap_obj->lock);
1165 seq_printf(m, "\n");
1168 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1170 struct omap_gem_object *omap_obj;
1174 list_for_each_entry(omap_obj, list, mm_list) {
1175 struct drm_gem_object *obj = &omap_obj->base;
1177 omap_gem_describe(obj, m);
1182 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1186 /* -----------------------------------------------------------------------------
1187 * Constructor & Destructor
1190 static void omap_gem_free_object(struct drm_gem_object *obj)
1192 struct drm_device *dev = obj->dev;
1193 struct omap_drm_private *priv = dev->dev_private;
1194 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1196 omap_gem_evict(obj);
1198 mutex_lock(&priv->list_lock);
1199 list_del(&omap_obj->mm_list);
1200 mutex_unlock(&priv->list_lock);
1203 * We own the sole reference to the object at this point, but to keep
1204 * lockdep happy, we must still take the omap_obj_lock to call
1205 * omap_gem_detach_pages(). This should hardly make any difference as
1206 * there can't be any lock contention.
1208 mutex_lock(&omap_obj->lock);
1210 /* The object should not be pinned. */
1211 WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
1213 if (omap_obj->pages) {
1214 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1215 kfree(omap_obj->pages);
1217 omap_gem_detach_pages(obj);
1220 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1221 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1222 omap_obj->dma_addr);
1223 } else if (omap_obj->vaddr) {
1224 vunmap(omap_obj->vaddr);
1225 } else if (obj->import_attach) {
1226 drm_prime_gem_destroy(obj, omap_obj->sgt);
1229 mutex_unlock(&omap_obj->lock);
1231 drm_gem_object_release(obj);
1233 mutex_destroy(&omap_obj->lock);
1238 static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1240 struct omap_drm_private *priv = dev->dev_private;
1242 switch (flags & OMAP_BO_CACHE_MASK) {
1243 case OMAP_BO_CACHED:
1245 case OMAP_BO_CACHE_MASK:
1252 if (flags & OMAP_BO_TILED_MASK) {
1253 if (!priv->usergart)
1256 switch (flags & OMAP_BO_TILED_MASK) {
1257 case OMAP_BO_TILED_8:
1258 case OMAP_BO_TILED_16:
1259 case OMAP_BO_TILED_32:
1270 static const struct vm_operations_struct omap_gem_vm_ops = {
1271 .fault = omap_gem_fault,
1272 .open = drm_gem_vm_open,
1273 .close = drm_gem_vm_close,
1276 static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1277 .free = omap_gem_free_object,
1278 .export = omap_gem_prime_export,
1279 .mmap = omap_gem_object_mmap,
1280 .vm_ops = &omap_gem_vm_ops,
1283 /* GEM buffer object constructor */
1284 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1285 union omap_gem_size gsize, u32 flags)
1287 struct omap_drm_private *priv = dev->dev_private;
1288 struct omap_gem_object *omap_obj;
1289 struct drm_gem_object *obj;
1290 struct address_space *mapping;
1294 if (!omap_gem_validate_flags(dev, flags))
1297 /* Validate the flags and compute the memory and cache flags. */
1298 if (flags & OMAP_BO_TILED_MASK) {
1300 * Tiled buffers are always shmem paged backed. When they are
1301 * scanned out, they are remapped into DMM/TILER.
1303 flags |= OMAP_BO_MEM_SHMEM;
1306 * Currently don't allow cached buffers. There is some caching
1307 * stuff that needs to be handled better.
1309 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1310 flags |= tiler_get_cpu_cache_flags();
1311 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1313 * If we don't have DMM, we must allocate scanout buffers
1314 * from contiguous DMA memory.
1316 flags |= OMAP_BO_MEM_DMA_API;
1317 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1319 * All other buffers not backed by dma_buf are shmem-backed.
1321 flags |= OMAP_BO_MEM_SHMEM;
1324 /* Allocate the initialize the OMAP GEM object. */
1325 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1329 obj = &omap_obj->base;
1330 omap_obj->flags = flags;
1331 mutex_init(&omap_obj->lock);
1333 if (flags & OMAP_BO_TILED_MASK) {
1335 * For tiled buffers align dimensions to slot boundaries and
1336 * calculate size based on aligned dimensions.
1338 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1339 &gsize.tiled.height);
1341 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1342 gsize.tiled.height);
1344 omap_obj->width = gsize.tiled.width;
1345 omap_obj->height = gsize.tiled.height;
1347 size = PAGE_ALIGN(gsize.bytes);
1350 obj->funcs = &omap_gem_object_funcs;
1352 /* Initialize the GEM object. */
1353 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1354 drm_gem_private_object_init(dev, obj, size);
1356 ret = drm_gem_object_init(dev, obj, size);
1360 mapping = obj->filp->f_mapping;
1361 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1364 /* Allocate memory if needed. */
1365 if (flags & OMAP_BO_MEM_DMA_API) {
1366 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1367 &omap_obj->dma_addr,
1369 if (!omap_obj->vaddr)
1373 mutex_lock(&priv->list_lock);
1374 list_add(&omap_obj->mm_list, &priv->obj_list);
1375 mutex_unlock(&priv->list_lock);
1380 drm_gem_object_release(obj);
1386 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1387 struct sg_table *sgt)
1389 struct omap_drm_private *priv = dev->dev_private;
1390 struct omap_gem_object *omap_obj;
1391 struct drm_gem_object *obj;
1392 union omap_gem_size gsize;
1394 /* Without a DMM only physically contiguous buffers can be supported. */
1395 if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm)
1396 return ERR_PTR(-EINVAL);
1398 gsize.bytes = PAGE_ALIGN(size);
1399 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1401 return ERR_PTR(-ENOMEM);
1403 omap_obj = to_omap_bo(obj);
1405 omap_obj->sgt = sgt;
1407 if (omap_gem_sgt_is_contiguous(sgt, size)) {
1408 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1410 /* Create pages list from sgt */
1411 struct page **pages;
1412 unsigned int npages;
1415 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1416 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1418 omap_gem_free_object(obj);
1419 return ERR_PTR(-ENOMEM);
1422 omap_obj->pages = pages;
1423 ret = drm_prime_sg_to_page_array(sgt, pages, npages);
1425 omap_gem_free_object(obj);
1426 return ERR_PTR(-ENOMEM);
1433 /* convenience method to construct a GEM buffer object, and userspace handle */
1434 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1435 union omap_gem_size gsize, u32 flags, u32 *handle)
1437 struct drm_gem_object *obj;
1440 obj = omap_gem_new(dev, gsize, flags);
1444 ret = drm_gem_handle_create(file, obj, handle);
1446 omap_gem_free_object(obj);
1450 /* drop reference from allocate - handle holds it now */
1451 drm_gem_object_put(obj);
1456 /* -----------------------------------------------------------------------------
1460 /* If DMM is used, we need to set some stuff up.. */
1461 void omap_gem_init(struct drm_device *dev)
1463 struct omap_drm_private *priv = dev->dev_private;
1464 struct omap_drm_usergart *usergart;
1465 const enum tiler_fmt fmts[] = {
1466 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1470 if (!dmm_is_available()) {
1471 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1472 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1476 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1480 /* reserve 4k aligned/wide regions for userspace mappings: */
1481 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1482 u16 h = 1, w = PAGE_SIZE >> i;
1484 tiler_align(fmts[i], &w, &h);
1485 /* note: since each region is 1 4kb page wide, and minimum
1486 * number of rows, the height ends up being the same as the
1487 * # of pages in the region
1489 usergart[i].height = h;
1490 usergart[i].height_shift = ilog2(h);
1491 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1492 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1493 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1494 struct omap_drm_usergart_entry *entry;
1495 struct tiler_block *block;
1497 entry = &usergart[i].entry[j];
1498 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1499 if (IS_ERR(block)) {
1501 "reserve failed: %d, %d, %ld\n",
1502 i, j, PTR_ERR(block));
1505 entry->dma_addr = tiler_ssptr(block);
1506 entry->block = block;
1508 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1510 usergart[i].stride_pfn << PAGE_SHIFT);
1514 priv->usergart = usergart;
1515 priv->has_dmm = true;
1518 void omap_gem_deinit(struct drm_device *dev)
1520 struct omap_drm_private *priv = dev->dev_private;
1522 /* I believe we can rely on there being no more outstanding GEM
1523 * objects which could depend on usergart/dmm at this point.
1525 kfree(priv->usergart);