]> Git Repo - linux.git/blob - drivers/gpu/drm/drm_fb_cma_helper.c
Merge branch 'work.sock_recvmsg' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / drm_fb_cma_helper.c
1 /*
2  * drm kms/fb cma (contiguous memory allocator) helper functions
3  *
4  * Copyright (C) 2012 Analog Device Inc.
5  *   Author: Lars-Peter Clausen <[email protected]>
6  *
7  * Based on udl_fbdev.c
8  *  Copyright (C) 2012 Red Hat
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <drm/drmP.h>
21 #include <drm/drm_fb_helper.h>
22 #include <drm/drm_framebuffer.h>
23 #include <drm/drm_gem_cma_helper.h>
24 #include <drm/drm_gem_framebuffer_helper.h>
25 #include <drm/drm_fb_cma_helper.h>
26 #include <linux/module.h>
27
28 #define DEFAULT_FBDEFIO_DELAY_MS 50
29
30 struct drm_fbdev_cma {
31         struct drm_fb_helper    fb_helper;
32         const struct drm_framebuffer_funcs *fb_funcs;
33 };
34
35 /**
36  * DOC: framebuffer cma helper functions
37  *
38  * Provides helper functions for creating a cma (contiguous memory allocator)
39  * backed framebuffer.
40  *
41  * drm_gem_fb_create() is used in the &drm_mode_config_funcs.fb_create
42  * callback function to create a cma backed framebuffer.
43  *
44  * An fbdev framebuffer backed by cma is also available by calling
45  * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
46  * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
47  * set up automatically. &drm_framebuffer_funcs.dirty is called by
48  * drm_fb_helper_deferred_io() in process context (&struct delayed_work).
49  *
50  * Example fbdev deferred io code::
51  *
52  *     static int driver_fb_dirty(struct drm_framebuffer *fb,
53  *                                struct drm_file *file_priv,
54  *                                unsigned flags, unsigned color,
55  *                                struct drm_clip_rect *clips,
56  *                                unsigned num_clips)
57  *     {
58  *         struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
59  *         ... push changes ...
60  *         return 0;
61  *     }
62  *
63  *     static struct drm_framebuffer_funcs driver_fb_funcs = {
64  *         .destroy       = drm_gem_fb_destroy,
65  *         .create_handle = drm_gem_fb_create_handle,
66  *         .dirty         = driver_fb_dirty,
67  *     };
68  *
69  * Initialize::
70  *
71  *     fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
72  *                                           dev->mode_config.num_crtc,
73  *                                           dev->mode_config.num_connector,
74  *                                           &driver_fb_funcs);
75  *
76  */
77
78 static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
79 {
80         return container_of(helper, struct drm_fbdev_cma, fb_helper);
81 }
82
83 /**
84  * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
85  * @fb: The framebuffer
86  * @plane: Which plane
87  *
88  * Return the CMA GEM object for given framebuffer.
89  *
90  * This function will usually be called from the CRTC callback functions.
91  */
92 struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
93                                                   unsigned int plane)
94 {
95         struct drm_gem_object *gem;
96
97         gem = drm_gem_fb_get_obj(fb, plane);
98         if (!gem)
99                 return NULL;
100
101         return to_drm_gem_cma_obj(gem);
102 }
103 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
104
105 /**
106  * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer
107  * @fb: The framebuffer
108  * @state: Which state of drm plane
109  * @plane: Which plane
110  * Return the CMA GEM address for given framebuffer.
111  *
112  * This function will usually be called from the PLANE callback functions.
113  */
114 dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
115                                    struct drm_plane_state *state,
116                                    unsigned int plane)
117 {
118         struct drm_gem_cma_object *obj;
119         dma_addr_t paddr;
120
121         obj = drm_fb_cma_get_gem_obj(fb, plane);
122         if (!obj)
123                 return 0;
124
125         paddr = obj->paddr + fb->offsets[plane];
126         paddr += fb->format->cpp[plane] * (state->src_x >> 16);
127         paddr += fb->pitches[plane] * (state->src_y >> 16);
128
129         return paddr;
130 }
131 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
132
133 #ifdef CONFIG_DEBUG_FS
134 static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
135 {
136         int i;
137
138         seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
139                         (char *)&fb->format->format);
140
141         for (i = 0; i < fb->format->num_planes; i++) {
142                 seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
143                                 i, fb->offsets[i], fb->pitches[i]);
144                 drm_gem_cma_describe(drm_fb_cma_get_gem_obj(fb, i), m);
145         }
146 }
147
148 /**
149  * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
150  *                             in debugfs.
151  * @m: output file
152  * @arg: private data for the callback
153  */
154 int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
155 {
156         struct drm_info_node *node = (struct drm_info_node *) m->private;
157         struct drm_device *dev = node->minor->dev;
158         struct drm_framebuffer *fb;
159
160         mutex_lock(&dev->mode_config.fb_lock);
161         drm_for_each_fb(fb, dev)
162                 drm_fb_cma_describe(fb, m);
163         mutex_unlock(&dev->mode_config.fb_lock);
164
165         return 0;
166 }
167 EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
168 #endif
169
170 static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
171 {
172         return dma_mmap_writecombine(info->device, vma, info->screen_base,
173                                      info->fix.smem_start, info->fix.smem_len);
174 }
175
176 static struct fb_ops drm_fbdev_cma_ops = {
177         .owner          = THIS_MODULE,
178         DRM_FB_HELPER_DEFAULT_OPS,
179         .fb_fillrect    = drm_fb_helper_sys_fillrect,
180         .fb_copyarea    = drm_fb_helper_sys_copyarea,
181         .fb_imageblit   = drm_fb_helper_sys_imageblit,
182         .fb_mmap        = drm_fb_cma_mmap,
183 };
184
185 static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
186                                           struct vm_area_struct *vma)
187 {
188         fb_deferred_io_mmap(info, vma);
189         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
190
191         return 0;
192 }
193
194 static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
195                                     struct drm_gem_cma_object *cma_obj)
196 {
197         struct fb_deferred_io *fbdefio;
198         struct fb_ops *fbops;
199
200         /*
201          * Per device structures are needed because:
202          * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
203          * fbdefio: individual delays
204          */
205         fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
206         fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
207         if (!fbdefio || !fbops) {
208                 kfree(fbdefio);
209                 kfree(fbops);
210                 return -ENOMEM;
211         }
212
213         /* can't be offset from vaddr since dirty() uses cma_obj */
214         fbi->screen_buffer = cma_obj->vaddr;
215         /* fb_deferred_io_fault() needs a physical address */
216         fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
217
218         *fbops = *fbi->fbops;
219         fbi->fbops = fbops;
220
221         fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
222         fbdefio->deferred_io = drm_fb_helper_deferred_io;
223         fbi->fbdefio = fbdefio;
224         fb_deferred_io_init(fbi);
225         fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
226
227         return 0;
228 }
229
230 static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
231 {
232         if (!fbi->fbdefio)
233                 return;
234
235         fb_deferred_io_cleanup(fbi);
236         kfree(fbi->fbdefio);
237         kfree(fbi->fbops);
238 }
239
240 static int
241 drm_fbdev_cma_create(struct drm_fb_helper *helper,
242         struct drm_fb_helper_surface_size *sizes)
243 {
244         struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
245         struct drm_device *dev = helper->dev;
246         struct drm_gem_cma_object *obj;
247         struct drm_framebuffer *fb;
248         unsigned int bytes_per_pixel;
249         unsigned long offset;
250         struct fb_info *fbi;
251         size_t size;
252         int ret;
253
254         DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
255                         sizes->surface_width, sizes->surface_height,
256                         sizes->surface_bpp);
257
258         bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
259         size = sizes->surface_width * sizes->surface_height * bytes_per_pixel;
260         obj = drm_gem_cma_create(dev, size);
261         if (IS_ERR(obj))
262                 return -ENOMEM;
263
264         fbi = drm_fb_helper_alloc_fbi(helper);
265         if (IS_ERR(fbi)) {
266                 ret = PTR_ERR(fbi);
267                 goto err_gem_free_object;
268         }
269
270         fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base,
271                                      fbdev_cma->fb_funcs);
272         if (IS_ERR(fb)) {
273                 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
274                 ret = PTR_ERR(fb);
275                 goto err_fb_info_destroy;
276         }
277
278         helper->fb = fb;
279
280         fbi->par = helper;
281         fbi->flags = FBINFO_FLAG_DEFAULT;
282         fbi->fbops = &drm_fbdev_cma_ops;
283
284         drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
285         drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
286
287         offset = fbi->var.xoffset * bytes_per_pixel;
288         offset += fbi->var.yoffset * fb->pitches[0];
289
290         dev->mode_config.fb_base = (resource_size_t)obj->paddr;
291         fbi->screen_base = obj->vaddr + offset;
292         fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
293         fbi->screen_size = size;
294         fbi->fix.smem_len = size;
295
296         if (fbdev_cma->fb_funcs->dirty) {
297                 ret = drm_fbdev_cma_defio_init(fbi, obj);
298                 if (ret)
299                         goto err_cma_destroy;
300         }
301
302         return 0;
303
304 err_cma_destroy:
305         drm_framebuffer_remove(fb);
306 err_fb_info_destroy:
307         drm_fb_helper_fini(helper);
308 err_gem_free_object:
309         drm_gem_object_put_unlocked(&obj->base);
310         return ret;
311 }
312
313 static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
314         .fb_probe = drm_fbdev_cma_create,
315 };
316
317 /**
318  * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
319  * @dev: DRM device
320  * @preferred_bpp: Preferred bits per pixel for the device
321  * @max_conn_count: Maximum number of connectors
322  * @funcs: fb helper functions, in particular a custom dirty() callback
323  *
324  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
325  */
326 struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
327         unsigned int preferred_bpp, unsigned int max_conn_count,
328         const struct drm_framebuffer_funcs *funcs)
329 {
330         struct drm_fbdev_cma *fbdev_cma;
331         struct drm_fb_helper *helper;
332         int ret;
333
334         fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
335         if (!fbdev_cma) {
336                 dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
337                 return ERR_PTR(-ENOMEM);
338         }
339         fbdev_cma->fb_funcs = funcs;
340
341         helper = &fbdev_cma->fb_helper;
342
343         drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
344
345         ret = drm_fb_helper_init(dev, helper, max_conn_count);
346         if (ret < 0) {
347                 dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
348                 goto err_free;
349         }
350
351         ret = drm_fb_helper_single_add_all_connectors(helper);
352         if (ret < 0) {
353                 dev_err(dev->dev, "Failed to add connectors.\n");
354                 goto err_drm_fb_helper_fini;
355
356         }
357
358         ret = drm_fb_helper_initial_config(helper, preferred_bpp);
359         if (ret < 0) {
360                 dev_err(dev->dev, "Failed to set initial hw configuration.\n");
361                 goto err_drm_fb_helper_fini;
362         }
363
364         return fbdev_cma;
365
366 err_drm_fb_helper_fini:
367         drm_fb_helper_fini(helper);
368 err_free:
369         kfree(fbdev_cma);
370
371         return ERR_PTR(ret);
372 }
373 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
374
375 static const struct drm_framebuffer_funcs drm_fb_cma_funcs = {
376         .destroy        = drm_gem_fb_destroy,
377         .create_handle  = drm_gem_fb_create_handle,
378 };
379
380 /**
381  * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
382  * @dev: DRM device
383  * @preferred_bpp: Preferred bits per pixel for the device
384  * @max_conn_count: Maximum number of connectors
385  *
386  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
387  */
388 struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
389         unsigned int preferred_bpp, unsigned int max_conn_count)
390 {
391         return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
392                                              max_conn_count,
393                                              &drm_fb_cma_funcs);
394 }
395 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
396
397 /**
398  * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
399  * @fbdev_cma: The drm_fbdev_cma struct
400  */
401 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
402 {
403         drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
404         if (fbdev_cma->fb_helper.fbdev)
405                 drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
406
407         if (fbdev_cma->fb_helper.fb)
408                 drm_framebuffer_remove(fbdev_cma->fb_helper.fb);
409
410         drm_fb_helper_fini(&fbdev_cma->fb_helper);
411         kfree(fbdev_cma);
412 }
413 EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
414
415 /**
416  * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
417  * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
418  *
419  * This function is usually called from the &drm_driver.lastclose callback.
420  */
421 void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
422 {
423         if (fbdev_cma)
424                 drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
425 }
426 EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
427
428 /**
429  * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
430  * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
431  *
432  * This function is usually called from the &drm_mode_config.output_poll_changed
433  * callback.
434  */
435 void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
436 {
437         if (fbdev_cma)
438                 drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
439 }
440 EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
441
442 /**
443  * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
444  * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
445  * @state: desired state, zero to resume, non-zero to suspend
446  *
447  * Calls drm_fb_helper_set_suspend, which is a wrapper around
448  * fb_set_suspend implemented by fbdev core.
449  */
450 void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state)
451 {
452         if (fbdev_cma)
453                 drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
454 }
455 EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
456
457 /**
458  * drm_fbdev_cma_set_suspend_unlocked - wrapper around
459  *                                      drm_fb_helper_set_suspend_unlocked
460  * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
461  * @state: desired state, zero to resume, non-zero to suspend
462  *
463  * Calls drm_fb_helper_set_suspend, which is a wrapper around
464  * fb_set_suspend implemented by fbdev core.
465  */
466 void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
467                                         bool state)
468 {
469         if (fbdev_cma)
470                 drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper,
471                                                    state);
472 }
473 EXPORT_SYMBOL(drm_fbdev_cma_set_suspend_unlocked);
This page took 0.06335 seconds and 4 git commands to generate.