1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
7 #include <linux/dma-mapping.h>
8 #include <linux/seq_file.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/spinlock.h>
11 #include <linux/pfn_t.h>
13 #include <drm/drm_prime.h>
14 #include <drm/drm_vma_manager.h>
17 #include "omap_dmm_tiler.h"
20 * GEM buffer object implementation.
23 /* note: we use upper 8 bits of flags for driver-internal flags: */
24 #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
25 #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
26 #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
28 struct omap_gem_object {
29 struct drm_gem_object base;
31 struct list_head mm_list;
35 /** width/height for tiled formats (rounded up to slot boundaries) */
38 /** roll applied when mapping to DMM */
41 /** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
45 * dma_addr contains the buffer DMA address. It is valid for
47 * - buffers allocated through the DMA mapping API (with the
48 * OMAP_BO_MEM_DMA_API flag set)
50 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
51 * if they are physically contiguous (when sgt->orig_nents == 1)
53 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
54 * which case the DMA address points to the TILER aperture
56 * Physically contiguous buffers have their DMA address equal to the
57 * physical address as we don't remap those buffers through the TILER.
59 * Buffers mapped to the TILER have their DMA address pointing to the
60 * TILER aperture. As TILER mappings are refcounted (through
61 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
62 * to ensure that the mapping won't disappear unexpectedly. References
63 * must be released with omap_gem_unpin().
68 * # of users of dma_addr
70 refcount_t dma_addr_cnt;
73 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
74 * is set and the sgt field is valid.
79 * tiler block used when buffer is remapped in DMM/TILER.
81 struct tiler_block *block;
84 * Array of backing pages, if allocated. Note that pages are never
85 * allocated for buffers originally allocated from contiguous memory
89 /** addresses corresponding to pages in above array */
90 dma_addr_t *dma_addrs;
93 * Virtual address, if mapped.
98 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
100 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
101 * not necessarily pinned in TILER all the time, and (b) when they are
102 * they are not necessarily page aligned, we reserve one or more small
103 * regions in each of the 2d containers to use as a user-GART where we
104 * can create a second page-aligned mapping of parts of the buffer
105 * being accessed from userspace.
107 * Note that we could optimize slightly when we know that multiple
108 * tiler containers are backed by the same PAT.. but I'll leave that
111 #define NUM_USERGART_ENTRIES 2
112 struct omap_drm_usergart_entry {
113 struct tiler_block *block; /* the reserved tiler block */
115 struct drm_gem_object *obj; /* the current pinned obj */
116 pgoff_t obj_pgoff; /* page offset of obj currently
120 struct omap_drm_usergart {
121 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
122 int height; /* height in rows */
123 int height_shift; /* ilog2(height in rows) */
124 int slot_shift; /* ilog2(width per slot) */
125 int stride_pfn; /* stride in pages */
126 int last; /* index of last used entry */
129 /* -----------------------------------------------------------------------------
133 /** get mmap offset */
134 u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
136 struct drm_device *dev = obj->dev;
140 /* Make it mmapable */
141 size = omap_gem_mmap_size(obj);
142 ret = drm_gem_create_mmap_offset_size(obj, size);
144 dev_err(dev->dev, "could not allocate mmap offset\n");
148 return drm_vma_node_offset_addr(&obj->vma_node);
151 static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
153 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
156 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
162 /* -----------------------------------------------------------------------------
166 static void omap_gem_evict_entry(struct drm_gem_object *obj,
167 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
169 struct omap_gem_object *omap_obj = to_omap_bo(obj);
170 struct omap_drm_private *priv = obj->dev->dev_private;
171 int n = priv->usergart[fmt].height;
172 size_t size = PAGE_SIZE * n;
173 loff_t off = omap_gem_mmap_offset(obj) +
174 (entry->obj_pgoff << PAGE_SHIFT);
175 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
179 /* if stride > than PAGE_SIZE then sparse mapping: */
180 for (i = n; i > 0; i--) {
181 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
183 off += PAGE_SIZE * m;
186 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
193 /* Evict a buffer from usergart, if it is mapped there */
194 static void omap_gem_evict(struct drm_gem_object *obj)
196 struct omap_gem_object *omap_obj = to_omap_bo(obj);
197 struct omap_drm_private *priv = obj->dev->dev_private;
199 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
200 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
203 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
204 struct omap_drm_usergart_entry *entry =
205 &priv->usergart[fmt].entry[i];
207 if (entry->obj == obj)
208 omap_gem_evict_entry(obj, fmt, entry);
213 /* -----------------------------------------------------------------------------
218 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
221 static int omap_gem_attach_pages(struct drm_gem_object *obj)
223 struct drm_device *dev = obj->dev;
224 struct omap_gem_object *omap_obj = to_omap_bo(obj);
226 int npages = obj->size >> PAGE_SHIFT;
230 lockdep_assert_held(&omap_obj->lock);
233 * If not using shmem (in which case backing pages don't need to be
234 * allocated) or if pages are already allocated we're done.
236 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
239 pages = drm_gem_get_pages(obj);
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages);
245 /* for non-cached buffers, ensure the new pages are clean because
246 * DSS, GPU, etc. are not cache coherent:
248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
249 addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
255 for (i = 0; i < npages; i++) {
256 addrs[i] = dma_map_page(dev->dev, pages[i],
257 0, PAGE_SIZE, DMA_TO_DEVICE);
259 if (dma_mapping_error(dev->dev, addrs[i])) {
261 "%s: failed to map page\n", __func__);
263 for (i = i - 1; i >= 0; --i) {
264 dma_unmap_page(dev->dev, addrs[i],
265 PAGE_SIZE, DMA_TO_DEVICE);
273 addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
280 omap_obj->dma_addrs = addrs;
281 omap_obj->pages = pages;
288 drm_gem_put_pages(obj, pages, true, false);
293 /* Release backing pages. Must be called with the omap_obj.lock held. */
294 static void omap_gem_detach_pages(struct drm_gem_object *obj)
296 struct omap_gem_object *omap_obj = to_omap_bo(obj);
297 unsigned int npages = obj->size >> PAGE_SHIFT;
300 lockdep_assert_held(&omap_obj->lock);
302 for (i = 0; i < npages; i++) {
303 if (omap_obj->dma_addrs[i])
304 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
305 PAGE_SIZE, DMA_TO_DEVICE);
308 kfree(omap_obj->dma_addrs);
309 omap_obj->dma_addrs = NULL;
311 drm_gem_put_pages(obj, omap_obj->pages, true, false);
312 omap_obj->pages = NULL;
315 /* get buffer flags */
316 u32 omap_gem_flags(struct drm_gem_object *obj)
318 return to_omap_bo(obj)->flags;
322 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
324 struct omap_gem_object *omap_obj = to_omap_bo(obj);
325 size_t size = obj->size;
327 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
328 /* for tiled buffers, the virtual size has stride rounded up
329 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
330 * 32kb later!). But we don't back the entire buffer with
331 * pages, only the valid picture part.. so need to adjust for
332 * this in the size used to mmap and generate mmap offset
334 size = tiler_vsize(gem2fmt(omap_obj->flags),
335 omap_obj->width, omap_obj->height);
341 /* -----------------------------------------------------------------------------
345 /* Normal handling for the case of faulting in non-tiled buffers */
346 static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
347 struct vm_area_struct *vma, struct vm_fault *vmf)
349 struct omap_gem_object *omap_obj = to_omap_bo(obj);
353 /* We don't use vmf->pgoff since that has the fake offset: */
354 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
356 if (omap_obj->pages) {
357 omap_gem_cpu_sync_page(obj, pgoff);
358 pfn = page_to_pfn(omap_obj->pages[pgoff]);
360 BUG_ON(!omap_gem_is_contiguous(omap_obj));
361 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
364 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
365 pfn, pfn << PAGE_SHIFT);
367 return vmf_insert_mixed(vma, vmf->address,
368 __pfn_to_pfn_t(pfn, PFN_DEV));
371 /* Special handling for the case of faulting in 2d tiled buffers */
372 static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
373 struct vm_area_struct *vma, struct vm_fault *vmf)
375 struct omap_gem_object *omap_obj = to_omap_bo(obj);
376 struct omap_drm_private *priv = obj->dev->dev_private;
377 struct omap_drm_usergart_entry *entry;
378 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
379 struct page *pages[64]; /* XXX is this too much to have on stack? */
381 pgoff_t pgoff, base_pgoff;
384 vm_fault_t ret = VM_FAULT_NOPAGE;
387 * Note the height of the slot is also equal to the number of pages
388 * that need to be mapped in to fill 4kb wide CPU page. If the slot
389 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
391 const int n = priv->usergart[fmt].height;
392 const int n_shift = priv->usergart[fmt].height_shift;
395 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
396 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
397 * into account in some of the math, so figure out virtual stride
400 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
402 /* We don't use vmf->pgoff since that has the fake offset: */
403 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
406 * Actual address we start mapping at is rounded down to previous slot
407 * boundary in the y direction:
409 base_pgoff = round_down(pgoff, m << n_shift);
411 /* figure out buffer width in slots */
412 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
414 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
416 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
418 /* evict previous buffer using this usergart entry, if any: */
420 omap_gem_evict_entry(entry->obj, fmt, entry);
423 entry->obj_pgoff = base_pgoff;
425 /* now convert base_pgoff to phys offset from virt offset: */
426 base_pgoff = (base_pgoff >> n_shift) * slots;
428 /* for wider-than 4k.. figure out which part of the slot-row we want: */
431 entry->obj_pgoff += off;
433 slots = min(slots - (off << n_shift), n);
434 base_pgoff += off << n_shift;
435 vaddr += off << PAGE_SHIFT;
439 * Map in pages. Beyond the valid pixel part of the buffer, we set
440 * pages[i] to NULL to get a dummy page mapped in.. if someone
441 * reads/writes it they will get random/undefined content, but at
442 * least it won't be corrupting whatever other random page used to
443 * be mapped in, or other undefined behavior.
445 memcpy(pages, &omap_obj->pages[base_pgoff],
446 sizeof(struct page *) * slots);
447 memset(pages + slots, 0,
448 sizeof(struct page *) * (n - slots));
450 err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
452 ret = vmf_error(err);
453 dev_err(obj->dev->dev, "failed to pin: %d\n", err);
457 pfn = entry->dma_addr >> PAGE_SHIFT;
459 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
460 pfn, pfn << PAGE_SHIFT);
462 for (i = n; i > 0; i--) {
463 ret = vmf_insert_mixed(vma,
464 vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
465 if (ret & VM_FAULT_ERROR)
467 pfn += priv->usergart[fmt].stride_pfn;
468 vaddr += PAGE_SIZE * m;
471 /* simple round-robin: */
472 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
473 % NUM_USERGART_ENTRIES;
479 * omap_gem_fault - pagefault handler for GEM objects
482 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
483 * does most of the work for us including the actual map/unmap calls
484 * but we need to do the actual page work.
486 * The VMA was set up by GEM. In doing so it also ensured that the
487 * vma->vm_private_data points to the GEM object that is backing this
490 static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
492 struct vm_area_struct *vma = vmf->vma;
493 struct drm_gem_object *obj = vma->vm_private_data;
494 struct omap_gem_object *omap_obj = to_omap_bo(obj);
498 /* Make sure we don't parallel update on a fault, nor move or remove
499 * something from beneath our feet
501 mutex_lock(&omap_obj->lock);
503 /* if a shmem backed object, make sure we have pages attached now */
504 err = omap_gem_attach_pages(obj);
506 ret = vmf_error(err);
510 /* where should we do corresponding put_pages().. we are mapping
511 * the original page, rather than thru a GART, so we can't rely
512 * on eviction to trigger this. But munmap() or all mappings should
513 * probably trigger put_pages()?
516 if (omap_obj->flags & OMAP_BO_TILED_MASK)
517 ret = omap_gem_fault_2d(obj, vma, vmf);
519 ret = omap_gem_fault_1d(obj, vma, vmf);
523 mutex_unlock(&omap_obj->lock);
527 /** We override mainly to fix up some of the vm mapping flags.. */
528 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
532 ret = drm_gem_mmap(filp, vma);
534 DBG("mmap failed: %d", ret);
538 return omap_gem_mmap_obj(vma->vm_private_data, vma);
541 int omap_gem_mmap_obj(struct drm_gem_object *obj,
542 struct vm_area_struct *vma)
544 struct omap_gem_object *omap_obj = to_omap_bo(obj);
546 vma->vm_flags &= ~VM_PFNMAP;
547 vma->vm_flags |= VM_MIXEDMAP;
549 if (omap_obj->flags & OMAP_BO_WC) {
550 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
551 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
552 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
555 * We do have some private objects, at least for scanout buffers
556 * on hardware without DMM/TILER. But these are allocated write-
559 if (WARN_ON(!obj->filp))
563 * Shunt off cached objs to shmem file so they have their own
564 * address_space (so unmap_mapping_range does what we want,
565 * in particular in the case of mmap'd dmabufs)
569 vma->vm_file = get_file(obj->filp);
571 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
577 /* -----------------------------------------------------------------------------
582 * omap_gem_dumb_create - create a dumb buffer
583 * @file: our client file
585 * @args: the requested arguments copied from userspace
587 * Allocate a buffer suitable for use for a frame buffer of the
588 * form described by user space. Give userspace a handle by which
591 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
592 struct drm_mode_create_dumb *args)
594 union omap_gem_size gsize;
596 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
598 args->size = PAGE_ALIGN(args->pitch * args->height);
600 gsize = (union omap_gem_size){
604 return omap_gem_new_handle(dev, file, gsize,
605 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
609 * omap_gem_dumb_map - buffer mapping for dumb interface
610 * @file: our drm client file
612 * @handle: GEM handle to the object (from dumb_create)
613 * @offset: memory map offset placeholder
615 * Do the necessary setup to allow the mapping of the frame buffer
616 * into user memory. We don't have to do much here at the moment.
618 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
619 u32 handle, u64 *offset)
621 struct drm_gem_object *obj;
624 /* GEM does all our handle to object mapping */
625 obj = drm_gem_object_lookup(file, handle);
631 *offset = omap_gem_mmap_offset(obj);
633 drm_gem_object_put(obj);
639 #ifdef CONFIG_DRM_FBDEV_EMULATION
640 /* Set scrolling position. This allows us to implement fast scrolling
643 * Call only from non-atomic contexts.
645 int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
647 struct omap_gem_object *omap_obj = to_omap_bo(obj);
648 u32 npages = obj->size >> PAGE_SHIFT;
652 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
656 omap_obj->roll = roll;
658 mutex_lock(&omap_obj->lock);
660 /* if we aren't mapped yet, we don't need to do anything */
661 if (omap_obj->block) {
662 ret = omap_gem_attach_pages(obj);
666 ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
669 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
673 mutex_unlock(&omap_obj->lock);
679 /* -----------------------------------------------------------------------------
680 * Memory Management & DMA Sync
684 * shmem buffers that are mapped cached are not coherent.
686 * We keep track of dirty pages using page faulting to perform cache management.
687 * When a page is mapped to the CPU in read/write mode the device can't access
688 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
689 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
690 * unmapped from the CPU.
692 static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
694 struct omap_gem_object *omap_obj = to_omap_bo(obj);
696 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
697 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
700 /* Sync the buffer for CPU access.. note pages should already be
701 * attached, ie. omap_gem_get_pages()
703 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
705 struct drm_device *dev = obj->dev;
706 struct omap_gem_object *omap_obj = to_omap_bo(obj);
708 if (omap_gem_is_cached_coherent(obj))
711 if (omap_obj->dma_addrs[pgoff]) {
712 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
713 PAGE_SIZE, DMA_TO_DEVICE);
714 omap_obj->dma_addrs[pgoff] = 0;
718 /* sync the buffer for DMA access */
719 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
720 enum dma_data_direction dir)
722 struct drm_device *dev = obj->dev;
723 struct omap_gem_object *omap_obj = to_omap_bo(obj);
724 int i, npages = obj->size >> PAGE_SHIFT;
725 struct page **pages = omap_obj->pages;
728 if (omap_gem_is_cached_coherent(obj))
731 for (i = 0; i < npages; i++) {
732 if (!omap_obj->dma_addrs[i]) {
735 addr = dma_map_page(dev->dev, pages[i], 0,
737 if (dma_mapping_error(dev->dev, addr)) {
738 dev_warn(dev->dev, "%s: failed to map page\n",
744 omap_obj->dma_addrs[i] = addr;
749 unmap_mapping_range(obj->filp->f_mapping, 0,
750 omap_gem_mmap_size(obj), 1);
755 * omap_gem_pin() - Pin a GEM object in memory
756 * @obj: the GEM object
757 * @dma_addr: the DMA address
759 * Pin the given GEM object in memory and fill the dma_addr pointer with the
760 * object's DMA address. If the buffer is not physically contiguous it will be
761 * remapped through the TILER to provide a contiguous view.
763 * Pins are reference-counted, calling this function multiple times is allowed
764 * as long the corresponding omap_gem_unpin() calls are balanced.
766 * Return 0 on success or a negative error code otherwise.
768 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
770 struct omap_drm_private *priv = obj->dev->dev_private;
771 struct omap_gem_object *omap_obj = to_omap_bo(obj);
774 mutex_lock(&omap_obj->lock);
776 if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
777 if (refcount_read(&omap_obj->dma_addr_cnt) == 0) {
778 u32 npages = obj->size >> PAGE_SHIFT;
779 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
780 struct tiler_block *block;
782 BUG_ON(omap_obj->block);
784 refcount_set(&omap_obj->dma_addr_cnt, 1);
786 ret = omap_gem_attach_pages(obj);
790 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
791 block = tiler_reserve_2d(fmt,
793 omap_obj->height, 0);
795 block = tiler_reserve_1d(obj->size);
799 ret = PTR_ERR(block);
800 dev_err(obj->dev->dev,
801 "could not remap: %d (%d)\n", ret, fmt);
805 /* TODO: enable async refill.. */
806 ret = tiler_pin(block, omap_obj->pages, npages,
807 omap_obj->roll, true);
809 tiler_release(block);
810 dev_err(obj->dev->dev,
811 "could not pin: %d\n", ret);
815 omap_obj->dma_addr = tiler_ssptr(block);
816 omap_obj->block = block;
818 DBG("got dma address: %pad", &omap_obj->dma_addr);
820 refcount_inc(&omap_obj->dma_addr_cnt);
824 *dma_addr = omap_obj->dma_addr;
825 } else if (omap_gem_is_contiguous(omap_obj)) {
827 *dma_addr = omap_obj->dma_addr;
834 mutex_unlock(&omap_obj->lock);
840 * omap_gem_unpin_locked() - Unpin a GEM object from memory
841 * @obj: the GEM object
843 * omap_gem_unpin() without locking.
845 static void omap_gem_unpin_locked(struct drm_gem_object *obj)
847 struct omap_drm_private *priv = obj->dev->dev_private;
848 struct omap_gem_object *omap_obj = to_omap_bo(obj);
851 if (omap_gem_is_contiguous(omap_obj) || !priv->has_dmm)
854 if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
855 ret = tiler_unpin(omap_obj->block);
857 dev_err(obj->dev->dev,
858 "could not unpin pages: %d\n", ret);
860 ret = tiler_release(omap_obj->block);
862 dev_err(obj->dev->dev,
863 "could not release unmap: %d\n", ret);
865 omap_obj->dma_addr = 0;
866 omap_obj->block = NULL;
871 * omap_gem_unpin() - Unpin a GEM object from memory
872 * @obj: the GEM object
874 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
875 * reference-counted, the actual unpin will only be performed when the number
876 * of calls to this function matches the number of calls to omap_gem_pin().
878 void omap_gem_unpin(struct drm_gem_object *obj)
880 struct omap_gem_object *omap_obj = to_omap_bo(obj);
882 mutex_lock(&omap_obj->lock);
883 omap_gem_unpin_locked(obj);
884 mutex_unlock(&omap_obj->lock);
887 /* Get rotated scanout address (only valid if already pinned), at the
888 * specified orientation and x,y offset from top-left corner of buffer
889 * (only valid for tiled 2d buffers)
891 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
892 int x, int y, dma_addr_t *dma_addr)
894 struct omap_gem_object *omap_obj = to_omap_bo(obj);
897 mutex_lock(&omap_obj->lock);
899 if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
900 (omap_obj->flags & OMAP_BO_TILED_MASK)) {
901 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
905 mutex_unlock(&omap_obj->lock);
910 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
911 int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
913 struct omap_gem_object *omap_obj = to_omap_bo(obj);
915 if (omap_obj->flags & OMAP_BO_TILED_MASK)
916 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
920 /* if !remap, and we don't have pages backing, then fail, rather than
921 * increasing the pin count (which we don't really do yet anyways,
922 * because we don't support swapping pages back out). And 'remap'
923 * might not be quite the right name, but I wanted to keep it working
924 * similarly to omap_gem_pin(). Note though that mutex is not
925 * aquired if !remap (because this can be called in atomic ctxt),
926 * but probably omap_gem_unpin() should be changed to work in the
927 * same way. If !remap, a matching omap_gem_put_pages() call is not
928 * required (and should not be made).
930 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
933 struct omap_gem_object *omap_obj = to_omap_bo(obj);
936 mutex_lock(&omap_obj->lock);
939 ret = omap_gem_attach_pages(obj);
944 if (!omap_obj->pages) {
949 *pages = omap_obj->pages;
952 mutex_unlock(&omap_obj->lock);
957 /* release pages when DMA no longer being performed */
958 int omap_gem_put_pages(struct drm_gem_object *obj)
960 /* do something here if we dynamically attach/detach pages.. at
961 * least they would no longer need to be pinned if everyone has
962 * released the pages..
967 #ifdef CONFIG_DRM_FBDEV_EMULATION
969 * Get kernel virtual address for CPU access.. this more or less only
970 * exists for omap_fbdev.
972 void *omap_gem_vaddr(struct drm_gem_object *obj)
974 struct omap_gem_object *omap_obj = to_omap_bo(obj);
978 mutex_lock(&omap_obj->lock);
980 if (!omap_obj->vaddr) {
981 ret = omap_gem_attach_pages(obj);
983 vaddr = ERR_PTR(ret);
987 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
988 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
991 vaddr = omap_obj->vaddr;
994 mutex_unlock(&omap_obj->lock);
999 /* -----------------------------------------------------------------------------
1004 /* re-pin objects in DMM in resume path: */
1005 int omap_gem_resume(struct drm_device *dev)
1007 struct omap_drm_private *priv = dev->dev_private;
1008 struct omap_gem_object *omap_obj;
1011 mutex_lock(&priv->list_lock);
1012 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1013 if (omap_obj->block) {
1014 struct drm_gem_object *obj = &omap_obj->base;
1015 u32 npages = obj->size >> PAGE_SHIFT;
1017 WARN_ON(!omap_obj->pages); /* this can't happen */
1018 ret = tiler_pin(omap_obj->block,
1019 omap_obj->pages, npages,
1020 omap_obj->roll, true);
1022 dev_err(dev->dev, "could not repin: %d\n", ret);
1029 mutex_unlock(&priv->list_lock);
1034 /* -----------------------------------------------------------------------------
1038 #ifdef CONFIG_DEBUG_FS
1039 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1041 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1044 off = drm_vma_node_start(&obj->vma_node);
1046 mutex_lock(&omap_obj->lock);
1048 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1049 omap_obj->flags, obj->name, kref_read(&obj->refcount),
1050 off, &omap_obj->dma_addr,
1051 refcount_read(&omap_obj->dma_addr_cnt),
1052 omap_obj->vaddr, omap_obj->roll);
1054 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1055 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1056 if (omap_obj->block) {
1057 struct tcm_area *area = &omap_obj->block->area;
1058 seq_printf(m, " (%dx%d, %dx%d)",
1059 area->p0.x, area->p0.y,
1060 area->p1.x, area->p1.y);
1063 seq_printf(m, " %zu", obj->size);
1066 mutex_unlock(&omap_obj->lock);
1068 seq_printf(m, "\n");
1071 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1073 struct omap_gem_object *omap_obj;
1077 list_for_each_entry(omap_obj, list, mm_list) {
1078 struct drm_gem_object *obj = &omap_obj->base;
1080 omap_gem_describe(obj, m);
1085 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1089 /* -----------------------------------------------------------------------------
1090 * Constructor & Destructor
1093 static void omap_gem_free_object(struct drm_gem_object *obj)
1095 struct drm_device *dev = obj->dev;
1096 struct omap_drm_private *priv = dev->dev_private;
1097 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1099 omap_gem_evict(obj);
1101 mutex_lock(&priv->list_lock);
1102 list_del(&omap_obj->mm_list);
1103 mutex_unlock(&priv->list_lock);
1106 * We own the sole reference to the object at this point, but to keep
1107 * lockdep happy, we must still take the omap_obj_lock to call
1108 * omap_gem_detach_pages(). This should hardly make any difference as
1109 * there can't be any lock contention.
1111 mutex_lock(&omap_obj->lock);
1113 /* The object should not be pinned. */
1114 WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0);
1116 if (omap_obj->pages) {
1117 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1118 kfree(omap_obj->pages);
1120 omap_gem_detach_pages(obj);
1123 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1124 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1125 omap_obj->dma_addr);
1126 } else if (omap_obj->vaddr) {
1127 vunmap(omap_obj->vaddr);
1128 } else if (obj->import_attach) {
1129 drm_prime_gem_destroy(obj, omap_obj->sgt);
1132 mutex_unlock(&omap_obj->lock);
1134 drm_gem_object_release(obj);
1136 mutex_destroy(&omap_obj->lock);
1141 static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1143 struct omap_drm_private *priv = dev->dev_private;
1145 switch (flags & OMAP_BO_CACHE_MASK) {
1146 case OMAP_BO_CACHED:
1148 case OMAP_BO_CACHE_MASK:
1155 if (flags & OMAP_BO_TILED_MASK) {
1156 if (!priv->usergart)
1159 switch (flags & OMAP_BO_TILED_MASK) {
1160 case OMAP_BO_TILED_8:
1161 case OMAP_BO_TILED_16:
1162 case OMAP_BO_TILED_32:
1173 static const struct vm_operations_struct omap_gem_vm_ops = {
1174 .fault = omap_gem_fault,
1175 .open = drm_gem_vm_open,
1176 .close = drm_gem_vm_close,
1179 static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1180 .free = omap_gem_free_object,
1181 .export = omap_gem_prime_export,
1182 .vm_ops = &omap_gem_vm_ops,
1185 /* GEM buffer object constructor */
1186 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1187 union omap_gem_size gsize, u32 flags)
1189 struct omap_drm_private *priv = dev->dev_private;
1190 struct omap_gem_object *omap_obj;
1191 struct drm_gem_object *obj;
1192 struct address_space *mapping;
1196 if (!omap_gem_validate_flags(dev, flags))
1199 /* Validate the flags and compute the memory and cache flags. */
1200 if (flags & OMAP_BO_TILED_MASK) {
1202 * Tiled buffers are always shmem paged backed. When they are
1203 * scanned out, they are remapped into DMM/TILER.
1205 flags |= OMAP_BO_MEM_SHMEM;
1208 * Currently don't allow cached buffers. There is some caching
1209 * stuff that needs to be handled better.
1211 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1212 flags |= tiler_get_cpu_cache_flags();
1213 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1215 * If we don't have DMM, we must allocate scanout buffers
1216 * from contiguous DMA memory.
1218 flags |= OMAP_BO_MEM_DMA_API;
1219 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1221 * All other buffers not backed by dma_buf are shmem-backed.
1223 flags |= OMAP_BO_MEM_SHMEM;
1226 /* Allocate the initialize the OMAP GEM object. */
1227 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1231 obj = &omap_obj->base;
1232 omap_obj->flags = flags;
1233 mutex_init(&omap_obj->lock);
1235 if (flags & OMAP_BO_TILED_MASK) {
1237 * For tiled buffers align dimensions to slot boundaries and
1238 * calculate size based on aligned dimensions.
1240 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1241 &gsize.tiled.height);
1243 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1244 gsize.tiled.height);
1246 omap_obj->width = gsize.tiled.width;
1247 omap_obj->height = gsize.tiled.height;
1249 size = PAGE_ALIGN(gsize.bytes);
1252 obj->funcs = &omap_gem_object_funcs;
1254 /* Initialize the GEM object. */
1255 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1256 drm_gem_private_object_init(dev, obj, size);
1258 ret = drm_gem_object_init(dev, obj, size);
1262 mapping = obj->filp->f_mapping;
1263 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1266 /* Allocate memory if needed. */
1267 if (flags & OMAP_BO_MEM_DMA_API) {
1268 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1269 &omap_obj->dma_addr,
1271 if (!omap_obj->vaddr)
1275 mutex_lock(&priv->list_lock);
1276 list_add(&omap_obj->mm_list, &priv->obj_list);
1277 mutex_unlock(&priv->list_lock);
1282 drm_gem_object_release(obj);
1288 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1289 struct sg_table *sgt)
1291 struct omap_drm_private *priv = dev->dev_private;
1292 struct omap_gem_object *omap_obj;
1293 struct drm_gem_object *obj;
1294 union omap_gem_size gsize;
1296 /* Without a DMM only physically contiguous buffers can be supported. */
1297 if (sgt->orig_nents != 1 && !priv->has_dmm)
1298 return ERR_PTR(-EINVAL);
1300 gsize.bytes = PAGE_ALIGN(size);
1301 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1303 return ERR_PTR(-ENOMEM);
1305 omap_obj = to_omap_bo(obj);
1307 mutex_lock(&omap_obj->lock);
1309 omap_obj->sgt = sgt;
1311 if (sgt->orig_nents == 1) {
1312 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1314 /* Create pages list from sgt */
1315 struct page **pages;
1316 unsigned int npages;
1319 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1320 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1322 omap_gem_free_object(obj);
1323 obj = ERR_PTR(-ENOMEM);
1327 omap_obj->pages = pages;
1328 ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
1331 omap_gem_free_object(obj);
1332 obj = ERR_PTR(-ENOMEM);
1338 mutex_unlock(&omap_obj->lock);
1342 /* convenience method to construct a GEM buffer object, and userspace handle */
1343 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1344 union omap_gem_size gsize, u32 flags, u32 *handle)
1346 struct drm_gem_object *obj;
1349 obj = omap_gem_new(dev, gsize, flags);
1353 ret = drm_gem_handle_create(file, obj, handle);
1355 omap_gem_free_object(obj);
1359 /* drop reference from allocate - handle holds it now */
1360 drm_gem_object_put(obj);
1365 /* -----------------------------------------------------------------------------
1369 /* If DMM is used, we need to set some stuff up.. */
1370 void omap_gem_init(struct drm_device *dev)
1372 struct omap_drm_private *priv = dev->dev_private;
1373 struct omap_drm_usergart *usergart;
1374 const enum tiler_fmt fmts[] = {
1375 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1379 if (!dmm_is_available()) {
1380 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1381 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1385 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1389 /* reserve 4k aligned/wide regions for userspace mappings: */
1390 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1391 u16 h = 1, w = PAGE_SIZE >> i;
1393 tiler_align(fmts[i], &w, &h);
1394 /* note: since each region is 1 4kb page wide, and minimum
1395 * number of rows, the height ends up being the same as the
1396 * # of pages in the region
1398 usergart[i].height = h;
1399 usergart[i].height_shift = ilog2(h);
1400 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1401 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1402 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1403 struct omap_drm_usergart_entry *entry;
1404 struct tiler_block *block;
1406 entry = &usergart[i].entry[j];
1407 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1408 if (IS_ERR(block)) {
1410 "reserve failed: %d, %d, %ld\n",
1411 i, j, PTR_ERR(block));
1414 entry->dma_addr = tiler_ssptr(block);
1415 entry->block = block;
1417 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1419 usergart[i].stride_pfn << PAGE_SHIFT);
1423 priv->usergart = usergart;
1424 priv->has_dmm = true;
1427 void omap_gem_deinit(struct drm_device *dev)
1429 struct omap_drm_private *priv = dev->dev_private;
1431 /* I believe we can rely on there being no more outstanding GEM
1432 * objects which could depend on usergart/dmm at this point.
1434 kfree(priv->usergart);