2 * drm kms/fb cma (contiguous memory allocator) helper functions
4 * Copyright (C) 2012 Analog Device Inc.
8 * Copyright (C) 2012 Red Hat
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
21 #include <drm/drm_fb_helper.h>
22 #include <drm/drm_framebuffer.h>
23 #include <drm/drm_gem_cma_helper.h>
24 #include <drm/drm_gem_framebuffer_helper.h>
25 #include <drm/drm_fb_cma_helper.h>
26 #include <linux/module.h>
28 #define DEFAULT_FBDEFIO_DELAY_MS 50
30 struct drm_fbdev_cma {
31 struct drm_fb_helper fb_helper;
32 const struct drm_framebuffer_funcs *fb_funcs;
36 * DOC: framebuffer cma helper functions
38 * Provides helper functions for creating a cma (contiguous memory allocator)
41 * drm_gem_fb_create() is used in the &drm_mode_config_funcs.fb_create
42 * callback function to create a cma backed framebuffer.
44 * An fbdev framebuffer backed by cma is also available by calling
45 * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
46 * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
47 * set up automatically. &drm_framebuffer_funcs.dirty is called by
48 * drm_fb_helper_deferred_io() in process context (&struct delayed_work).
50 * Example fbdev deferred io code::
52 * static int driver_fb_dirty(struct drm_framebuffer *fb,
53 * struct drm_file *file_priv,
54 * unsigned flags, unsigned color,
55 * struct drm_clip_rect *clips,
58 * struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
59 * ... push changes ...
63 * static struct drm_framebuffer_funcs driver_fb_funcs = {
64 * .destroy = drm_gem_fb_destroy,
65 * .create_handle = drm_gem_fb_create_handle,
66 * .dirty = driver_fb_dirty,
71 * fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
72 * dev->mode_config.num_crtc,
73 * dev->mode_config.num_connector,
78 static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
80 return container_of(helper, struct drm_fbdev_cma, fb_helper);
84 * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
85 * @fb: The framebuffer
88 * Return the CMA GEM object for given framebuffer.
90 * This function will usually be called from the CRTC callback functions.
92 struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
95 struct drm_gem_object *gem;
97 gem = drm_gem_fb_get_obj(fb, plane);
101 return to_drm_gem_cma_obj(gem);
103 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
106 * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer
107 * @fb: The framebuffer
108 * @state: Which state of drm plane
109 * @plane: Which plane
110 * Return the CMA GEM address for given framebuffer.
112 * This function will usually be called from the PLANE callback functions.
114 dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
115 struct drm_plane_state *state,
118 struct drm_gem_cma_object *obj;
121 obj = drm_fb_cma_get_gem_obj(fb, plane);
125 paddr = obj->paddr + fb->offsets[plane];
126 paddr += fb->format->cpp[plane] * (state->src_x >> 16);
127 paddr += fb->pitches[plane] * (state->src_y >> 16);
131 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
133 #ifdef CONFIG_DEBUG_FS
134 static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
138 seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
139 (char *)&fb->format->format);
141 for (i = 0; i < fb->format->num_planes; i++) {
142 seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
143 i, fb->offsets[i], fb->pitches[i]);
144 drm_gem_cma_describe(drm_fb_cma_get_gem_obj(fb, i), m);
149 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
152 * @arg: private data for the callback
154 int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
156 struct drm_info_node *node = (struct drm_info_node *) m->private;
157 struct drm_device *dev = node->minor->dev;
158 struct drm_framebuffer *fb;
160 mutex_lock(&dev->mode_config.fb_lock);
161 drm_for_each_fb(fb, dev)
162 drm_fb_cma_describe(fb, m);
163 mutex_unlock(&dev->mode_config.fb_lock);
167 EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
170 static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
172 return dma_mmap_writecombine(info->device, vma, info->screen_base,
173 info->fix.smem_start, info->fix.smem_len);
176 static struct fb_ops drm_fbdev_cma_ops = {
177 .owner = THIS_MODULE,
178 DRM_FB_HELPER_DEFAULT_OPS,
179 .fb_fillrect = drm_fb_helper_sys_fillrect,
180 .fb_copyarea = drm_fb_helper_sys_copyarea,
181 .fb_imageblit = drm_fb_helper_sys_imageblit,
182 .fb_mmap = drm_fb_cma_mmap,
185 static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
186 struct vm_area_struct *vma)
188 fb_deferred_io_mmap(info, vma);
189 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
194 static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
195 struct drm_gem_cma_object *cma_obj)
197 struct fb_deferred_io *fbdefio;
198 struct fb_ops *fbops;
201 * Per device structures are needed because:
202 * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
203 * fbdefio: individual delays
205 fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
206 fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
207 if (!fbdefio || !fbops) {
213 /* can't be offset from vaddr since dirty() uses cma_obj */
214 fbi->screen_buffer = cma_obj->vaddr;
215 /* fb_deferred_io_fault() needs a physical address */
216 fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
218 *fbops = *fbi->fbops;
221 fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
222 fbdefio->deferred_io = drm_fb_helper_deferred_io;
223 fbi->fbdefio = fbdefio;
224 fb_deferred_io_init(fbi);
225 fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
230 static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
235 fb_deferred_io_cleanup(fbi);
241 drm_fbdev_cma_create(struct drm_fb_helper *helper,
242 struct drm_fb_helper_surface_size *sizes)
244 struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
245 struct drm_device *dev = helper->dev;
246 struct drm_gem_cma_object *obj;
247 struct drm_framebuffer *fb;
248 unsigned int bytes_per_pixel;
249 unsigned long offset;
254 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
255 sizes->surface_width, sizes->surface_height,
258 bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
259 size = sizes->surface_width * sizes->surface_height * bytes_per_pixel;
260 obj = drm_gem_cma_create(dev, size);
264 fbi = drm_fb_helper_alloc_fbi(helper);
267 goto err_gem_free_object;
270 fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base,
271 fbdev_cma->fb_funcs);
273 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
275 goto err_fb_info_destroy;
281 fbi->flags = FBINFO_FLAG_DEFAULT;
282 fbi->fbops = &drm_fbdev_cma_ops;
284 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
285 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
287 offset = fbi->var.xoffset * bytes_per_pixel;
288 offset += fbi->var.yoffset * fb->pitches[0];
290 dev->mode_config.fb_base = (resource_size_t)obj->paddr;
291 fbi->screen_base = obj->vaddr + offset;
292 fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
293 fbi->screen_size = size;
294 fbi->fix.smem_len = size;
296 if (fbdev_cma->fb_funcs->dirty) {
297 ret = drm_fbdev_cma_defio_init(fbi, obj);
299 goto err_cma_destroy;
305 drm_framebuffer_remove(fb);
307 drm_fb_helper_fini(helper);
309 drm_gem_object_put_unlocked(&obj->base);
313 static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
314 .fb_probe = drm_fbdev_cma_create,
318 * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
320 * @preferred_bpp: Preferred bits per pixel for the device
321 * @max_conn_count: Maximum number of connectors
322 * @funcs: fb helper functions, in particular a custom dirty() callback
324 * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
326 struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
327 unsigned int preferred_bpp, unsigned int max_conn_count,
328 const struct drm_framebuffer_funcs *funcs)
330 struct drm_fbdev_cma *fbdev_cma;
331 struct drm_fb_helper *helper;
334 fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
336 dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
337 return ERR_PTR(-ENOMEM);
339 fbdev_cma->fb_funcs = funcs;
341 helper = &fbdev_cma->fb_helper;
343 drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
345 ret = drm_fb_helper_init(dev, helper, max_conn_count);
347 dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
351 ret = drm_fb_helper_single_add_all_connectors(helper);
353 dev_err(dev->dev, "Failed to add connectors.\n");
354 goto err_drm_fb_helper_fini;
358 ret = drm_fb_helper_initial_config(helper, preferred_bpp);
360 dev_err(dev->dev, "Failed to set initial hw configuration.\n");
361 goto err_drm_fb_helper_fini;
366 err_drm_fb_helper_fini:
367 drm_fb_helper_fini(helper);
373 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
375 static const struct drm_framebuffer_funcs drm_fb_cma_funcs = {
376 .destroy = drm_gem_fb_destroy,
377 .create_handle = drm_gem_fb_create_handle,
381 * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
383 * @preferred_bpp: Preferred bits per pixel for the device
384 * @max_conn_count: Maximum number of connectors
386 * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
388 struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
389 unsigned int preferred_bpp, unsigned int max_conn_count)
391 return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
395 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
398 * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
399 * @fbdev_cma: The drm_fbdev_cma struct
401 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
403 drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
404 if (fbdev_cma->fb_helper.fbdev)
405 drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
407 if (fbdev_cma->fb_helper.fb)
408 drm_framebuffer_remove(fbdev_cma->fb_helper.fb);
410 drm_fb_helper_fini(&fbdev_cma->fb_helper);
413 EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
416 * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
417 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
419 * This function is usually called from the &drm_driver.lastclose callback.
421 void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
424 drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
426 EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
429 * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
430 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
432 * This function is usually called from the &drm_mode_config.output_poll_changed
435 void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
438 drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
440 EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
443 * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
444 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
445 * @state: desired state, zero to resume, non-zero to suspend
447 * Calls drm_fb_helper_set_suspend, which is a wrapper around
448 * fb_set_suspend implemented by fbdev core.
450 void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state)
453 drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
455 EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
458 * drm_fbdev_cma_set_suspend_unlocked - wrapper around
459 * drm_fb_helper_set_suspend_unlocked
460 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
461 * @state: desired state, zero to resume, non-zero to suspend
463 * Calls drm_fb_helper_set_suspend, which is a wrapper around
464 * fb_set_suspend implemented by fbdev core.
466 void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
470 drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper,
473 EXPORT_SYMBOL(drm_fbdev_cma_set_suspend_unlocked);