2 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/seq_file.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/spinlock.h>
21 #include <linux/pfn_t.h>
23 #include <drm/drm_vma_manager.h>
26 #include "omap_dmm_tiler.h"
29 * GEM buffer object implementation.
32 /* note: we use upper 8 bits of flags for driver-internal flags: */
33 #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
34 #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
35 #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
37 struct omap_gem_object {
38 struct drm_gem_object base;
40 struct list_head mm_list;
44 /** width/height for tiled formats (rounded up to slot boundaries) */
47 /** roll applied when mapping to DMM */
51 * dma_addr contains the buffer DMA address. It is valid for
53 * - buffers allocated through the DMA mapping API (with the
54 * OMAP_BO_MEM_DMA_API flag set)
56 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
57 * if they are physically contiguous (when sgt->orig_nents == 1)
59 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
60 * which case the DMA address points to the TILER aperture
62 * Physically contiguous buffers have their DMA address equal to the
63 * physical address as we don't remap those buffers through the TILER.
65 * Buffers mapped to the TILER have their DMA address pointing to the
66 * TILER aperture. As TILER mappings are refcounted (through
67 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
68 * to ensure that the mapping won't disappear unexpectedly. References
69 * must be released with omap_gem_unpin().
74 * # of users of dma_addr
79 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
80 * is set and the sgt field is valid.
85 * tiler block used when buffer is remapped in DMM/TILER.
87 struct tiler_block *block;
90 * Array of backing pages, if allocated. Note that pages are never
91 * allocated for buffers originally allocated from contiguous memory
95 /** addresses corresponding to pages in above array */
96 dma_addr_t *dma_addrs;
99 * Virtual address, if mapped.
104 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
106 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
107 * not necessarily pinned in TILER all the time, and (b) when they are
108 * they are not necessarily page aligned, we reserve one or more small
109 * regions in each of the 2d containers to use as a user-GART where we
110 * can create a second page-aligned mapping of parts of the buffer
111 * being accessed from userspace.
113 * Note that we could optimize slightly when we know that multiple
114 * tiler containers are backed by the same PAT.. but I'll leave that
117 #define NUM_USERGART_ENTRIES 2
118 struct omap_drm_usergart_entry {
119 struct tiler_block *block; /* the reserved tiler block */
121 struct drm_gem_object *obj; /* the current pinned obj */
122 pgoff_t obj_pgoff; /* page offset of obj currently
126 struct omap_drm_usergart {
127 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
128 int height; /* height in rows */
129 int height_shift; /* ilog2(height in rows) */
130 int slot_shift; /* ilog2(width per slot) */
131 int stride_pfn; /* stride in pages */
132 int last; /* index of last used entry */
135 /* -----------------------------------------------------------------------------
139 /** get mmap offset */
140 static u64 mmap_offset(struct drm_gem_object *obj)
142 struct drm_device *dev = obj->dev;
146 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
148 /* Make it mmapable */
149 size = omap_gem_mmap_size(obj);
150 ret = drm_gem_create_mmap_offset_size(obj, size);
152 dev_err(dev->dev, "could not allocate mmap offset\n");
156 return drm_vma_node_offset_addr(&obj->vma_node);
159 static bool is_contiguous(struct omap_gem_object *omap_obj)
161 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
164 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
170 /* -----------------------------------------------------------------------------
174 static void evict_entry(struct drm_gem_object *obj,
175 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
177 struct omap_gem_object *omap_obj = to_omap_bo(obj);
178 struct omap_drm_private *priv = obj->dev->dev_private;
179 int n = priv->usergart[fmt].height;
180 size_t size = PAGE_SIZE * n;
181 loff_t off = mmap_offset(obj) +
182 (entry->obj_pgoff << PAGE_SHIFT);
183 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
187 /* if stride > than PAGE_SIZE then sparse mapping: */
188 for (i = n; i > 0; i--) {
189 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
191 off += PAGE_SIZE * m;
194 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
201 /* Evict a buffer from usergart, if it is mapped there */
202 static void evict(struct drm_gem_object *obj)
204 struct omap_gem_object *omap_obj = to_omap_bo(obj);
205 struct omap_drm_private *priv = obj->dev->dev_private;
207 if (omap_obj->flags & OMAP_BO_TILED) {
208 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
211 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
212 struct omap_drm_usergart_entry *entry =
213 &priv->usergart[fmt].entry[i];
215 if (entry->obj == obj)
216 evict_entry(obj, fmt, entry);
221 /* -----------------------------------------------------------------------------
225 /** ensure backing pages are allocated */
226 static int omap_gem_attach_pages(struct drm_gem_object *obj)
228 struct drm_device *dev = obj->dev;
229 struct omap_gem_object *omap_obj = to_omap_bo(obj);
231 int npages = obj->size >> PAGE_SHIFT;
235 WARN_ON(omap_obj->pages);
237 pages = drm_gem_get_pages(obj);
239 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
240 return PTR_ERR(pages);
243 /* for non-cached buffers, ensure the new pages are clean because
244 * DSS, GPU, etc. are not cache coherent:
246 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
247 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
253 for (i = 0; i < npages; i++) {
254 addrs[i] = dma_map_page(dev->dev, pages[i],
255 0, PAGE_SIZE, DMA_TO_DEVICE);
257 if (dma_mapping_error(dev->dev, addrs[i])) {
259 "%s: failed to map page\n", __func__);
261 for (i = i - 1; i >= 0; --i) {
262 dma_unmap_page(dev->dev, addrs[i],
263 PAGE_SIZE, DMA_TO_DEVICE);
271 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
278 omap_obj->dma_addrs = addrs;
279 omap_obj->pages = pages;
286 drm_gem_put_pages(obj, pages, true, false);
291 /* acquire pages when needed (for example, for DMA where physically
292 * contiguous buffer is not required
294 static int get_pages(struct drm_gem_object *obj, struct page ***pages)
296 struct omap_gem_object *omap_obj = to_omap_bo(obj);
299 if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
300 ret = omap_gem_attach_pages(obj);
302 dev_err(obj->dev->dev, "could not attach pages\n");
307 /* TODO: even phys-contig.. we should have a list of pages? */
308 *pages = omap_obj->pages;
313 /** release backing pages */
314 static void omap_gem_detach_pages(struct drm_gem_object *obj)
316 struct omap_gem_object *omap_obj = to_omap_bo(obj);
317 unsigned int npages = obj->size >> PAGE_SHIFT;
320 for (i = 0; i < npages; i++) {
321 if (omap_obj->dma_addrs[i])
322 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
323 PAGE_SIZE, DMA_TO_DEVICE);
326 kfree(omap_obj->dma_addrs);
327 omap_obj->dma_addrs = NULL;
329 drm_gem_put_pages(obj, omap_obj->pages, true, false);
330 omap_obj->pages = NULL;
333 /* get buffer flags */
334 u32 omap_gem_flags(struct drm_gem_object *obj)
336 return to_omap_bo(obj)->flags;
339 u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
343 mutex_lock(&obj->dev->struct_mutex);
344 offset = mmap_offset(obj);
345 mutex_unlock(&obj->dev->struct_mutex);
350 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
352 struct omap_gem_object *omap_obj = to_omap_bo(obj);
353 size_t size = obj->size;
355 if (omap_obj->flags & OMAP_BO_TILED) {
356 /* for tiled buffers, the virtual size has stride rounded up
357 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
358 * 32kb later!). But we don't back the entire buffer with
359 * pages, only the valid picture part.. so need to adjust for
360 * this in the size used to mmap and generate mmap offset
362 size = tiler_vsize(gem2fmt(omap_obj->flags),
363 omap_obj->width, omap_obj->height);
369 /* -----------------------------------------------------------------------------
373 /* Normal handling for the case of faulting in non-tiled buffers */
374 static int fault_1d(struct drm_gem_object *obj,
375 struct vm_area_struct *vma, struct vm_fault *vmf)
377 struct omap_gem_object *omap_obj = to_omap_bo(obj);
381 /* We don't use vmf->pgoff since that has the fake offset: */
382 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
384 if (omap_obj->pages) {
385 omap_gem_cpu_sync_page(obj, pgoff);
386 pfn = page_to_pfn(omap_obj->pages[pgoff]);
388 BUG_ON(!is_contiguous(omap_obj));
389 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
392 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
393 pfn, pfn << PAGE_SHIFT);
395 return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
398 /* Special handling for the case of faulting in 2d tiled buffers */
399 static int fault_2d(struct drm_gem_object *obj,
400 struct vm_area_struct *vma, struct vm_fault *vmf)
402 struct omap_gem_object *omap_obj = to_omap_bo(obj);
403 struct omap_drm_private *priv = obj->dev->dev_private;
404 struct omap_drm_usergart_entry *entry;
405 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
406 struct page *pages[64]; /* XXX is this too much to have on stack? */
408 pgoff_t pgoff, base_pgoff;
413 * Note the height of the slot is also equal to the number of pages
414 * that need to be mapped in to fill 4kb wide CPU page. If the slot
415 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
417 const int n = priv->usergart[fmt].height;
418 const int n_shift = priv->usergart[fmt].height_shift;
421 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
422 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
423 * into account in some of the math, so figure out virtual stride
426 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
428 /* We don't use vmf->pgoff since that has the fake offset: */
429 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
432 * Actual address we start mapping at is rounded down to previous slot
433 * boundary in the y direction:
435 base_pgoff = round_down(pgoff, m << n_shift);
437 /* figure out buffer width in slots */
438 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
440 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
442 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
444 /* evict previous buffer using this usergart entry, if any: */
446 evict_entry(entry->obj, fmt, entry);
449 entry->obj_pgoff = base_pgoff;
451 /* now convert base_pgoff to phys offset from virt offset: */
452 base_pgoff = (base_pgoff >> n_shift) * slots;
454 /* for wider-than 4k.. figure out which part of the slot-row we want: */
457 entry->obj_pgoff += off;
459 slots = min(slots - (off << n_shift), n);
460 base_pgoff += off << n_shift;
461 vaddr += off << PAGE_SHIFT;
465 * Map in pages. Beyond the valid pixel part of the buffer, we set
466 * pages[i] to NULL to get a dummy page mapped in.. if someone
467 * reads/writes it they will get random/undefined content, but at
468 * least it won't be corrupting whatever other random page used to
469 * be mapped in, or other undefined behavior.
471 memcpy(pages, &omap_obj->pages[base_pgoff],
472 sizeof(struct page *) * slots);
473 memset(pages + slots, 0,
474 sizeof(struct page *) * (n - slots));
476 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
478 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
482 pfn = entry->dma_addr >> PAGE_SHIFT;
484 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
485 pfn, pfn << PAGE_SHIFT);
487 for (i = n; i > 0; i--) {
488 vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
489 pfn += priv->usergart[fmt].stride_pfn;
490 vaddr += PAGE_SIZE * m;
493 /* simple round-robin: */
494 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
495 % NUM_USERGART_ENTRIES;
501 * omap_gem_fault - pagefault handler for GEM objects
504 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
505 * does most of the work for us including the actual map/unmap calls
506 * but we need to do the actual page work.
508 * The VMA was set up by GEM. In doing so it also ensured that the
509 * vma->vm_private_data points to the GEM object that is backing this
512 int omap_gem_fault(struct vm_fault *vmf)
514 struct vm_area_struct *vma = vmf->vma;
515 struct drm_gem_object *obj = vma->vm_private_data;
516 struct omap_gem_object *omap_obj = to_omap_bo(obj);
517 struct drm_device *dev = obj->dev;
521 /* Make sure we don't parallel update on a fault, nor move or remove
522 * something from beneath our feet
524 mutex_lock(&dev->struct_mutex);
526 /* if a shmem backed object, make sure we have pages attached now */
527 ret = get_pages(obj, &pages);
531 /* where should we do corresponding put_pages().. we are mapping
532 * the original page, rather than thru a GART, so we can't rely
533 * on eviction to trigger this. But munmap() or all mappings should
534 * probably trigger put_pages()?
537 if (omap_obj->flags & OMAP_BO_TILED)
538 ret = fault_2d(obj, vma, vmf);
540 ret = fault_1d(obj, vma, vmf);
544 mutex_unlock(&dev->struct_mutex);
551 * EBUSY is ok: this just means that another thread
552 * already did the job.
554 return VM_FAULT_NOPAGE;
558 return VM_FAULT_SIGBUS;
562 /** We override mainly to fix up some of the vm mapping flags.. */
563 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
567 ret = drm_gem_mmap(filp, vma);
569 DBG("mmap failed: %d", ret);
573 return omap_gem_mmap_obj(vma->vm_private_data, vma);
576 int omap_gem_mmap_obj(struct drm_gem_object *obj,
577 struct vm_area_struct *vma)
579 struct omap_gem_object *omap_obj = to_omap_bo(obj);
581 vma->vm_flags &= ~VM_PFNMAP;
582 vma->vm_flags |= VM_MIXEDMAP;
584 if (omap_obj->flags & OMAP_BO_WC) {
585 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
586 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
587 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
590 * We do have some private objects, at least for scanout buffers
591 * on hardware without DMM/TILER. But these are allocated write-
594 if (WARN_ON(!obj->filp))
598 * Shunt off cached objs to shmem file so they have their own
599 * address_space (so unmap_mapping_range does what we want,
600 * in particular in the case of mmap'd dmabufs)
604 vma->vm_file = get_file(obj->filp);
606 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
612 /* -----------------------------------------------------------------------------
617 * omap_gem_dumb_create - create a dumb buffer
618 * @drm_file: our client file
620 * @args: the requested arguments copied from userspace
622 * Allocate a buffer suitable for use for a frame buffer of the
623 * form described by user space. Give userspace a handle by which
626 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
627 struct drm_mode_create_dumb *args)
629 union omap_gem_size gsize;
631 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
633 args->size = PAGE_ALIGN(args->pitch * args->height);
635 gsize = (union omap_gem_size){
639 return omap_gem_new_handle(dev, file, gsize,
640 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
644 * omap_gem_dumb_map - buffer mapping for dumb interface
645 * @file: our drm client file
647 * @handle: GEM handle to the object (from dumb_create)
649 * Do the necessary setup to allow the mapping of the frame buffer
650 * into user memory. We don't have to do much here at the moment.
652 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
653 u32 handle, u64 *offset)
655 struct drm_gem_object *obj;
658 /* GEM does all our handle to object mapping */
659 obj = drm_gem_object_lookup(file, handle);
665 *offset = omap_gem_mmap_offset(obj);
667 drm_gem_object_unreference_unlocked(obj);
673 #ifdef CONFIG_DRM_FBDEV_EMULATION
674 /* Set scrolling position. This allows us to implement fast scrolling
677 * Call only from non-atomic contexts.
679 int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
681 struct omap_gem_object *omap_obj = to_omap_bo(obj);
682 u32 npages = obj->size >> PAGE_SHIFT;
686 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
690 omap_obj->roll = roll;
692 mutex_lock(&obj->dev->struct_mutex);
694 /* if we aren't mapped yet, we don't need to do anything */
695 if (omap_obj->block) {
697 ret = get_pages(obj, &pages);
700 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
702 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
706 mutex_unlock(&obj->dev->struct_mutex);
712 /* -----------------------------------------------------------------------------
713 * Memory Management & DMA Sync
717 * shmem buffers that are mapped cached are not coherent.
719 * We keep track of dirty pages using page faulting to perform cache management.
720 * When a page is mapped to the CPU in read/write mode the device can't access
721 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
722 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
723 * unmapped from the CPU.
725 static inline bool is_cached_coherent(struct drm_gem_object *obj)
727 struct omap_gem_object *omap_obj = to_omap_bo(obj);
729 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
730 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
733 /* Sync the buffer for CPU access.. note pages should already be
734 * attached, ie. omap_gem_get_pages()
736 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
738 struct drm_device *dev = obj->dev;
739 struct omap_gem_object *omap_obj = to_omap_bo(obj);
741 if (is_cached_coherent(obj))
744 if (omap_obj->dma_addrs[pgoff]) {
745 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
746 PAGE_SIZE, DMA_TO_DEVICE);
747 omap_obj->dma_addrs[pgoff] = 0;
751 /* sync the buffer for DMA access */
752 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
753 enum dma_data_direction dir)
755 struct drm_device *dev = obj->dev;
756 struct omap_gem_object *omap_obj = to_omap_bo(obj);
757 int i, npages = obj->size >> PAGE_SHIFT;
758 struct page **pages = omap_obj->pages;
761 if (is_cached_coherent(obj))
764 for (i = 0; i < npages; i++) {
765 if (!omap_obj->dma_addrs[i]) {
768 addr = dma_map_page(dev->dev, pages[i], 0,
770 if (dma_mapping_error(dev->dev, addr)) {
771 dev_warn(dev->dev, "%s: failed to map page\n",
777 omap_obj->dma_addrs[i] = addr;
782 unmap_mapping_range(obj->filp->f_mapping, 0,
783 omap_gem_mmap_size(obj), 1);
788 * omap_gem_pin() - Pin a GEM object in memory
789 * @obj: the GEM object
790 * @dma_addr: the DMA address
792 * Pin the given GEM object in memory and fill the dma_addr pointer with the
793 * object's DMA address. If the buffer is not physically contiguous it will be
794 * remapped through the TILER to provide a contiguous view.
796 * Pins are reference-counted, calling this function multiple times is allowed
797 * as long the corresponding omap_gem_unpin() calls are balanced.
799 * Return 0 on success or a negative error code otherwise.
801 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
803 struct omap_drm_private *priv = obj->dev->dev_private;
804 struct omap_gem_object *omap_obj = to_omap_bo(obj);
807 mutex_lock(&obj->dev->struct_mutex);
809 if (!is_contiguous(omap_obj) && priv->has_dmm) {
810 if (omap_obj->dma_addr_cnt == 0) {
812 u32 npages = obj->size >> PAGE_SHIFT;
813 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
814 struct tiler_block *block;
816 BUG_ON(omap_obj->block);
818 ret = get_pages(obj, &pages);
822 if (omap_obj->flags & OMAP_BO_TILED) {
823 block = tiler_reserve_2d(fmt,
825 omap_obj->height, 0);
827 block = tiler_reserve_1d(obj->size);
831 ret = PTR_ERR(block);
832 dev_err(obj->dev->dev,
833 "could not remap: %d (%d)\n", ret, fmt);
837 /* TODO: enable async refill.. */
838 ret = tiler_pin(block, pages, npages,
839 omap_obj->roll, true);
841 tiler_release(block);
842 dev_err(obj->dev->dev,
843 "could not pin: %d\n", ret);
847 omap_obj->dma_addr = tiler_ssptr(block);
848 omap_obj->block = block;
850 DBG("got dma address: %pad", &omap_obj->dma_addr);
853 omap_obj->dma_addr_cnt++;
855 *dma_addr = omap_obj->dma_addr;
856 } else if (is_contiguous(omap_obj)) {
857 *dma_addr = omap_obj->dma_addr;
864 mutex_unlock(&obj->dev->struct_mutex);
870 * omap_gem_unpin() - Unpin a GEM object from memory
871 * @obj: the GEM object
873 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
874 * reference-counted, the actualy unpin will only be performed when the number
875 * of calls to this function matches the number of calls to omap_gem_pin().
877 void omap_gem_unpin(struct drm_gem_object *obj)
879 struct omap_gem_object *omap_obj = to_omap_bo(obj);
882 mutex_lock(&obj->dev->struct_mutex);
883 if (omap_obj->dma_addr_cnt > 0) {
884 omap_obj->dma_addr_cnt--;
885 if (omap_obj->dma_addr_cnt == 0) {
886 ret = tiler_unpin(omap_obj->block);
888 dev_err(obj->dev->dev,
889 "could not unpin pages: %d\n", ret);
891 ret = tiler_release(omap_obj->block);
893 dev_err(obj->dev->dev,
894 "could not release unmap: %d\n", ret);
896 omap_obj->dma_addr = 0;
897 omap_obj->block = NULL;
901 mutex_unlock(&obj->dev->struct_mutex);
904 /* Get rotated scanout address (only valid if already pinned), at the
905 * specified orientation and x,y offset from top-left corner of buffer
906 * (only valid for tiled 2d buffers)
908 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
909 int x, int y, dma_addr_t *dma_addr)
911 struct omap_gem_object *omap_obj = to_omap_bo(obj);
914 mutex_lock(&obj->dev->struct_mutex);
915 if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
916 (omap_obj->flags & OMAP_BO_TILED)) {
917 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
920 mutex_unlock(&obj->dev->struct_mutex);
924 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
925 int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
927 struct omap_gem_object *omap_obj = to_omap_bo(obj);
929 if (omap_obj->flags & OMAP_BO_TILED)
930 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
934 /* if !remap, and we don't have pages backing, then fail, rather than
935 * increasing the pin count (which we don't really do yet anyways,
936 * because we don't support swapping pages back out). And 'remap'
937 * might not be quite the right name, but I wanted to keep it working
938 * similarly to omap_gem_pin(). Note though that mutex is not
939 * aquired if !remap (because this can be called in atomic ctxt),
940 * but probably omap_gem_unpin() should be changed to work in the
941 * same way. If !remap, a matching omap_gem_put_pages() call is not
942 * required (and should not be made).
944 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
949 struct omap_gem_object *omap_obj = to_omap_bo(obj);
950 if (!omap_obj->pages)
952 *pages = omap_obj->pages;
955 mutex_lock(&obj->dev->struct_mutex);
956 ret = get_pages(obj, pages);
957 mutex_unlock(&obj->dev->struct_mutex);
961 /* release pages when DMA no longer being performed */
962 int omap_gem_put_pages(struct drm_gem_object *obj)
964 /* do something here if we dynamically attach/detach pages.. at
965 * least they would no longer need to be pinned if everyone has
966 * released the pages..
971 #ifdef CONFIG_DRM_FBDEV_EMULATION
972 /* Get kernel virtual address for CPU access.. this more or less only
973 * exists for omap_fbdev. This should be called with struct_mutex
976 void *omap_gem_vaddr(struct drm_gem_object *obj)
978 struct omap_gem_object *omap_obj = to_omap_bo(obj);
979 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
980 if (!omap_obj->vaddr) {
982 int ret = get_pages(obj, &pages);
985 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
986 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
988 return omap_obj->vaddr;
992 /* -----------------------------------------------------------------------------
997 /* re-pin objects in DMM in resume path: */
998 int omap_gem_resume(struct drm_device *dev)
1000 struct omap_drm_private *priv = dev->dev_private;
1001 struct omap_gem_object *omap_obj;
1004 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1005 if (omap_obj->block) {
1006 struct drm_gem_object *obj = &omap_obj->base;
1007 u32 npages = obj->size >> PAGE_SHIFT;
1009 WARN_ON(!omap_obj->pages); /* this can't happen */
1010 ret = tiler_pin(omap_obj->block,
1011 omap_obj->pages, npages,
1012 omap_obj->roll, true);
1014 dev_err(dev->dev, "could not repin: %d\n", ret);
1024 /* -----------------------------------------------------------------------------
1028 #ifdef CONFIG_DEBUG_FS
1029 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1031 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1034 off = drm_vma_node_start(&obj->vma_node);
1036 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1037 omap_obj->flags, obj->name, kref_read(&obj->refcount),
1038 off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
1039 omap_obj->vaddr, omap_obj->roll);
1041 if (omap_obj->flags & OMAP_BO_TILED) {
1042 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1043 if (omap_obj->block) {
1044 struct tcm_area *area = &omap_obj->block->area;
1045 seq_printf(m, " (%dx%d, %dx%d)",
1046 area->p0.x, area->p0.y,
1047 area->p1.x, area->p1.y);
1050 seq_printf(m, " %zu", obj->size);
1053 seq_printf(m, "\n");
1056 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1058 struct omap_gem_object *omap_obj;
1062 list_for_each_entry(omap_obj, list, mm_list) {
1063 struct drm_gem_object *obj = &omap_obj->base;
1065 omap_gem_describe(obj, m);
1070 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1074 /* -----------------------------------------------------------------------------
1075 * Constructor & Destructor
1078 void omap_gem_free_object(struct drm_gem_object *obj)
1080 struct drm_device *dev = obj->dev;
1081 struct omap_drm_private *priv = dev->dev_private;
1082 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1086 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1088 spin_lock(&priv->list_lock);
1089 list_del(&omap_obj->mm_list);
1090 spin_unlock(&priv->list_lock);
1092 /* this means the object is still pinned.. which really should
1093 * not happen. I think..
1095 WARN_ON(omap_obj->dma_addr_cnt > 0);
1097 if (omap_obj->pages) {
1098 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1099 kfree(omap_obj->pages);
1101 omap_gem_detach_pages(obj);
1104 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1105 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1106 omap_obj->dma_addr);
1107 } else if (omap_obj->vaddr) {
1108 vunmap(omap_obj->vaddr);
1109 } else if (obj->import_attach) {
1110 drm_prime_gem_destroy(obj, omap_obj->sgt);
1113 drm_gem_object_release(obj);
1118 /* GEM buffer object constructor */
1119 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1120 union omap_gem_size gsize, u32 flags)
1122 struct omap_drm_private *priv = dev->dev_private;
1123 struct omap_gem_object *omap_obj;
1124 struct drm_gem_object *obj;
1125 struct address_space *mapping;
1129 /* Validate the flags and compute the memory and cache flags. */
1130 if (flags & OMAP_BO_TILED) {
1131 if (!priv->usergart) {
1132 dev_err(dev->dev, "Tiled buffers require DMM\n");
1137 * Tiled buffers are always shmem paged backed. When they are
1138 * scanned out, they are remapped into DMM/TILER.
1140 flags &= ~OMAP_BO_SCANOUT;
1141 flags |= OMAP_BO_MEM_SHMEM;
1144 * Currently don't allow cached buffers. There is some caching
1145 * stuff that needs to be handled better.
1147 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1148 flags |= tiler_get_cpu_cache_flags();
1149 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1151 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1152 * tiled. However, to lower the pressure on memory allocation,
1153 * use contiguous memory only if no TILER is available.
1155 flags |= OMAP_BO_MEM_DMA_API;
1156 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1158 * All other buffers not backed by dma_buf are shmem-backed.
1160 flags |= OMAP_BO_MEM_SHMEM;
1163 /* Allocate the initialize the OMAP GEM object. */
1164 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1168 obj = &omap_obj->base;
1169 omap_obj->flags = flags;
1171 if (flags & OMAP_BO_TILED) {
1173 * For tiled buffers align dimensions to slot boundaries and
1174 * calculate size based on aligned dimensions.
1176 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1177 &gsize.tiled.height);
1179 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1180 gsize.tiled.height);
1182 omap_obj->width = gsize.tiled.width;
1183 omap_obj->height = gsize.tiled.height;
1185 size = PAGE_ALIGN(gsize.bytes);
1188 /* Initialize the GEM object. */
1189 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1190 drm_gem_private_object_init(dev, obj, size);
1192 ret = drm_gem_object_init(dev, obj, size);
1196 mapping = obj->filp->f_mapping;
1197 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1200 /* Allocate memory if needed. */
1201 if (flags & OMAP_BO_MEM_DMA_API) {
1202 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1203 &omap_obj->dma_addr,
1205 if (!omap_obj->vaddr)
1209 spin_lock(&priv->list_lock);
1210 list_add(&omap_obj->mm_list, &priv->obj_list);
1211 spin_unlock(&priv->list_lock);
1216 drm_gem_object_release(obj);
1222 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1223 struct sg_table *sgt)
1225 struct omap_drm_private *priv = dev->dev_private;
1226 struct omap_gem_object *omap_obj;
1227 struct drm_gem_object *obj;
1228 union omap_gem_size gsize;
1230 /* Without a DMM only physically contiguous buffers can be supported. */
1231 if (sgt->orig_nents != 1 && !priv->has_dmm)
1232 return ERR_PTR(-EINVAL);
1234 mutex_lock(&dev->struct_mutex);
1236 gsize.bytes = PAGE_ALIGN(size);
1237 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1239 obj = ERR_PTR(-ENOMEM);
1243 omap_obj = to_omap_bo(obj);
1244 omap_obj->sgt = sgt;
1246 if (sgt->orig_nents == 1) {
1247 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1249 /* Create pages list from sgt */
1250 struct sg_page_iter iter;
1251 struct page **pages;
1252 unsigned int npages;
1255 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1256 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1258 omap_gem_free_object(obj);
1259 obj = ERR_PTR(-ENOMEM);
1263 omap_obj->pages = pages;
1265 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1266 pages[i++] = sg_page_iter_page(&iter);
1271 if (WARN_ON(i != npages)) {
1272 omap_gem_free_object(obj);
1273 obj = ERR_PTR(-ENOMEM);
1279 mutex_unlock(&dev->struct_mutex);
1283 /* convenience method to construct a GEM buffer object, and userspace handle */
1284 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1285 union omap_gem_size gsize, u32 flags, u32 *handle)
1287 struct drm_gem_object *obj;
1290 obj = omap_gem_new(dev, gsize, flags);
1294 ret = drm_gem_handle_create(file, obj, handle);
1296 omap_gem_free_object(obj);
1300 /* drop reference from allocate - handle holds it now */
1301 drm_gem_object_unreference_unlocked(obj);
1306 /* -----------------------------------------------------------------------------
1310 /* If DMM is used, we need to set some stuff up.. */
1311 void omap_gem_init(struct drm_device *dev)
1313 struct omap_drm_private *priv = dev->dev_private;
1314 struct omap_drm_usergart *usergart;
1315 const enum tiler_fmt fmts[] = {
1316 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1320 if (!dmm_is_available()) {
1321 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1322 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1326 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1330 /* reserve 4k aligned/wide regions for userspace mappings: */
1331 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1332 u16 h = 1, w = PAGE_SIZE >> i;
1334 tiler_align(fmts[i], &w, &h);
1335 /* note: since each region is 1 4kb page wide, and minimum
1336 * number of rows, the height ends up being the same as the
1337 * # of pages in the region
1339 usergart[i].height = h;
1340 usergart[i].height_shift = ilog2(h);
1341 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1342 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1343 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1344 struct omap_drm_usergart_entry *entry;
1345 struct tiler_block *block;
1347 entry = &usergart[i].entry[j];
1348 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1349 if (IS_ERR(block)) {
1351 "reserve failed: %d, %d, %ld\n",
1352 i, j, PTR_ERR(block));
1355 entry->dma_addr = tiler_ssptr(block);
1356 entry->block = block;
1358 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1360 usergart[i].stride_pfn << PAGE_SHIFT);
1364 priv->usergart = usergart;
1365 priv->has_dmm = true;
1368 void omap_gem_deinit(struct drm_device *dev)
1370 struct omap_drm_private *priv = dev->dev_private;
1372 /* I believe we can rely on there being no more outstanding GEM
1373 * objects which could depend on usergart/dmm at this point.
1375 kfree(priv->usergart);