]> Git Repo - linux.git/blob - drivers/gpu/drm/drm_fbdev_generic.c
drm/amd: Load MES microcode during early_init
[linux.git] / drivers / gpu / drm / drm_fbdev_generic.c
1 // SPDX-License-Identifier: MIT
2
3 #include <linux/moduleparam.h>
4 #include <linux/vmalloc.h>
5
6 #include <drm/drm_crtc_helper.h>
7 #include <drm/drm_drv.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_print.h>
11
12 #include <drm/drm_fbdev_generic.h>
13
14 static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
15 {
16         struct drm_device *dev = fb_helper->dev;
17         struct drm_framebuffer *fb = fb_helper->fb;
18
19         return dev->mode_config.prefer_shadow_fbdev ||
20                dev->mode_config.prefer_shadow ||
21                fb->funcs->dirty;
22 }
23
24 /* @user: 1=userspace, 0=fbcon */
25 static int drm_fbdev_fb_open(struct fb_info *info, int user)
26 {
27         struct drm_fb_helper *fb_helper = info->par;
28
29         /* No need to take a ref for fbcon because it unbinds on unregister */
30         if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
31                 return -ENODEV;
32
33         return 0;
34 }
35
36 static int drm_fbdev_fb_release(struct fb_info *info, int user)
37 {
38         struct drm_fb_helper *fb_helper = info->par;
39
40         if (user)
41                 module_put(fb_helper->dev->driver->fops->owner);
42
43         return 0;
44 }
45
46 static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
47 {
48         struct fb_info *fbi = fb_helper->info;
49         void *shadow = NULL;
50
51         if (!fb_helper->dev)
52                 return;
53
54         if (fbi) {
55                 if (fbi->fbdefio)
56                         fb_deferred_io_cleanup(fbi);
57                 if (drm_fbdev_use_shadow_fb(fb_helper))
58                         shadow = fbi->screen_buffer;
59         }
60
61         drm_fb_helper_fini(fb_helper);
62
63         if (shadow)
64                 vfree(shadow);
65         else if (fb_helper->buffer)
66                 drm_client_buffer_vunmap(fb_helper->buffer);
67
68         drm_client_framebuffer_delete(fb_helper->buffer);
69 }
70
71 static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
72 {
73         drm_fbdev_cleanup(fb_helper);
74         drm_client_release(&fb_helper->client);
75         kfree(fb_helper);
76 }
77
78 /*
79  * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
80  * unregister_framebuffer() or fb_release().
81  */
82 static void drm_fbdev_fb_destroy(struct fb_info *info)
83 {
84         drm_fbdev_release(info->par);
85 }
86
87 static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
88 {
89         struct drm_fb_helper *fb_helper = info->par;
90
91         if (drm_fbdev_use_shadow_fb(fb_helper))
92                 return fb_deferred_io_mmap(info, vma);
93         else if (fb_helper->dev->driver->gem_prime_mmap)
94                 return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
95         else
96                 return -ENODEV;
97 }
98
99 static bool drm_fbdev_use_iomem(struct fb_info *info)
100 {
101         struct drm_fb_helper *fb_helper = info->par;
102         struct drm_client_buffer *buffer = fb_helper->buffer;
103
104         return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
105 }
106
107 static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
108                                  size_t count, loff_t *ppos)
109 {
110         ssize_t ret;
111
112         if (drm_fbdev_use_iomem(info))
113                 ret = drm_fb_helper_cfb_read(info, buf, count, ppos);
114         else
115                 ret = drm_fb_helper_sys_read(info, buf, count, ppos);
116
117         return ret;
118 }
119
120 static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
121                                   size_t count, loff_t *ppos)
122 {
123         ssize_t ret;
124
125         if (drm_fbdev_use_iomem(info))
126                 ret = drm_fb_helper_cfb_write(info, buf, count, ppos);
127         else
128                 ret = drm_fb_helper_sys_write(info, buf, count, ppos);
129
130         return ret;
131 }
132
133 static void drm_fbdev_fb_fillrect(struct fb_info *info,
134                                   const struct fb_fillrect *rect)
135 {
136         if (drm_fbdev_use_iomem(info))
137                 drm_fb_helper_cfb_fillrect(info, rect);
138         else
139                 drm_fb_helper_sys_fillrect(info, rect);
140 }
141
142 static void drm_fbdev_fb_copyarea(struct fb_info *info,
143                                   const struct fb_copyarea *area)
144 {
145         if (drm_fbdev_use_iomem(info))
146                 drm_fb_helper_cfb_copyarea(info, area);
147         else
148                 drm_fb_helper_sys_copyarea(info, area);
149 }
150
151 static void drm_fbdev_fb_imageblit(struct fb_info *info,
152                                    const struct fb_image *image)
153 {
154         if (drm_fbdev_use_iomem(info))
155                 drm_fb_helper_cfb_imageblit(info, image);
156         else
157                 drm_fb_helper_sys_imageblit(info, image);
158 }
159
160 static const struct fb_ops drm_fbdev_fb_ops = {
161         .owner          = THIS_MODULE,
162         DRM_FB_HELPER_DEFAULT_OPS,
163         .fb_open        = drm_fbdev_fb_open,
164         .fb_release     = drm_fbdev_fb_release,
165         .fb_destroy     = drm_fbdev_fb_destroy,
166         .fb_mmap        = drm_fbdev_fb_mmap,
167         .fb_read        = drm_fbdev_fb_read,
168         .fb_write       = drm_fbdev_fb_write,
169         .fb_fillrect    = drm_fbdev_fb_fillrect,
170         .fb_copyarea    = drm_fbdev_fb_copyarea,
171         .fb_imageblit   = drm_fbdev_fb_imageblit,
172 };
173
174 static struct fb_deferred_io drm_fbdev_defio = {
175         .delay          = HZ / 20,
176         .deferred_io    = drm_fb_helper_deferred_io,
177 };
178
179 /*
180  * This function uses the client API to create a framebuffer backed by a dumb buffer.
181  */
182 static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
183                               struct drm_fb_helper_surface_size *sizes)
184 {
185         struct drm_client_dev *client = &fb_helper->client;
186         struct drm_device *dev = fb_helper->dev;
187         struct drm_client_buffer *buffer;
188         struct drm_framebuffer *fb;
189         struct fb_info *fbi;
190         u32 format;
191         struct iosys_map map;
192         int ret;
193
194         drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
195                     sizes->surface_width, sizes->surface_height,
196                     sizes->surface_bpp);
197
198         format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
199         buffer = drm_client_framebuffer_create(client, sizes->surface_width,
200                                                sizes->surface_height, format);
201         if (IS_ERR(buffer))
202                 return PTR_ERR(buffer);
203
204         fb_helper->buffer = buffer;
205         fb_helper->fb = buffer->fb;
206         fb = buffer->fb;
207
208         fbi = drm_fb_helper_alloc_info(fb_helper);
209         if (IS_ERR(fbi))
210                 return PTR_ERR(fbi);
211
212         fbi->fbops = &drm_fbdev_fb_ops;
213         fbi->screen_size = sizes->surface_height * fb->pitches[0];
214         fbi->fix.smem_len = fbi->screen_size;
215         fbi->flags = FBINFO_DEFAULT;
216
217         drm_fb_helper_fill_info(fbi, fb_helper, sizes);
218
219         if (drm_fbdev_use_shadow_fb(fb_helper)) {
220                 fbi->screen_buffer = vzalloc(fbi->screen_size);
221                 if (!fbi->screen_buffer)
222                         return -ENOMEM;
223                 fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
224
225                 fbi->fbdefio = &drm_fbdev_defio;
226                 fb_deferred_io_init(fbi);
227         } else {
228                 /* buffer is mapped for HW framebuffer */
229                 ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
230                 if (ret)
231                         return ret;
232                 if (map.is_iomem) {
233                         fbi->screen_base = map.vaddr_iomem;
234                 } else {
235                         fbi->screen_buffer = map.vaddr;
236                         fbi->flags |= FBINFO_VIRTFB;
237                 }
238
239                 /*
240                  * Shamelessly leak the physical address to user-space. As
241                  * page_to_phys() is undefined for I/O memory, warn in this
242                  * case.
243                  */
244 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
245                 if (fb_helper->hint_leak_smem_start && fbi->fix.smem_start == 0 &&
246                     !drm_WARN_ON_ONCE(dev, map.is_iomem))
247                         fbi->fix.smem_start =
248                                 page_to_phys(virt_to_page(fbi->screen_buffer));
249 #endif
250         }
251
252         return 0;
253 }
254
255 static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
256                                        struct drm_clip_rect *clip,
257                                        struct iosys_map *dst)
258 {
259         struct drm_framebuffer *fb = fb_helper->fb;
260         size_t offset = clip->y1 * fb->pitches[0];
261         size_t len = clip->x2 - clip->x1;
262         unsigned int y;
263         void *src;
264
265         switch (drm_format_info_bpp(fb->format, 0)) {
266         case 1:
267                 offset += clip->x1 / 8;
268                 len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
269                 break;
270         case 2:
271                 offset += clip->x1 / 4;
272                 len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
273                 break;
274         case 4:
275                 offset += clip->x1 / 2;
276                 len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
277                 break;
278         default:
279                 offset += clip->x1 * fb->format->cpp[0];
280                 len *= fb->format->cpp[0];
281                 break;
282         }
283
284         src = fb_helper->info->screen_buffer + offset;
285         iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
286
287         for (y = clip->y1; y < clip->y2; y++) {
288                 iosys_map_memcpy_to(dst, 0, src, len);
289                 iosys_map_incr(dst, fb->pitches[0]);
290                 src += fb->pitches[0];
291         }
292 }
293
294 static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
295                                  struct drm_clip_rect *clip)
296 {
297         struct drm_client_buffer *buffer = fb_helper->buffer;
298         struct iosys_map map, dst;
299         int ret;
300
301         /*
302          * We have to pin the client buffer to its current location while
303          * flushing the shadow buffer. In the general case, concurrent
304          * modesetting operations could try to move the buffer and would
305          * fail. The modeset has to be serialized by acquiring the reservation
306          * object of the underlying BO here.
307          *
308          * For fbdev emulation, we only have to protect against fbdev modeset
309          * operations. Nothing else will involve the client buffer's BO. So it
310          * is sufficient to acquire struct drm_fb_helper.lock here.
311          */
312         mutex_lock(&fb_helper->lock);
313
314         ret = drm_client_buffer_vmap(buffer, &map);
315         if (ret)
316                 goto out;
317
318         dst = map;
319         drm_fbdev_damage_blit_real(fb_helper, clip, &dst);
320
321         drm_client_buffer_vunmap(buffer);
322
323 out:
324         mutex_unlock(&fb_helper->lock);
325
326         return ret;
327 }
328
329 static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
330 {
331         struct drm_device *dev = helper->dev;
332         int ret;
333
334         if (!drm_fbdev_use_shadow_fb(helper))
335                 return 0;
336
337         /* Call damage handlers only if necessary */
338         if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
339                 return 0;
340
341         if (helper->buffer) {
342                 ret = drm_fbdev_damage_blit(helper, clip);
343                 if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
344                         return ret;
345         }
346
347         if (helper->fb->funcs->dirty) {
348                 ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
349                 if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
350                         return ret;
351         }
352
353         return 0;
354 }
355
356 static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
357         .fb_probe = drm_fbdev_fb_probe,
358         .fb_dirty = drm_fbdev_fb_dirty,
359 };
360
361 static void drm_fbdev_client_unregister(struct drm_client_dev *client)
362 {
363         struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
364
365         if (fb_helper->info)
366                 /* drm_fbdev_fb_destroy() takes care of cleanup */
367                 drm_fb_helper_unregister_info(fb_helper);
368         else
369                 drm_fbdev_release(fb_helper);
370 }
371
372 static int drm_fbdev_client_restore(struct drm_client_dev *client)
373 {
374         drm_fb_helper_lastclose(client->dev);
375
376         return 0;
377 }
378
379 static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
380 {
381         struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
382         struct drm_device *dev = client->dev;
383         int ret;
384
385         /* Setup is not retried if it has failed */
386         if (!fb_helper->dev && fb_helper->funcs)
387                 return 0;
388
389         if (dev->fb_helper)
390                 return drm_fb_helper_hotplug_event(dev->fb_helper);
391
392         if (!dev->mode_config.num_connector) {
393                 drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
394                 return 0;
395         }
396
397         drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
398
399         ret = drm_fb_helper_init(dev, fb_helper);
400         if (ret)
401                 goto err;
402
403         if (!drm_drv_uses_atomic_modeset(dev))
404                 drm_helper_disable_unused_functions(dev);
405
406         ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
407         if (ret)
408                 goto err_cleanup;
409
410         return 0;
411
412 err_cleanup:
413         drm_fbdev_cleanup(fb_helper);
414 err:
415         fb_helper->dev = NULL;
416         fb_helper->info = NULL;
417
418         drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
419
420         return ret;
421 }
422
423 static const struct drm_client_funcs drm_fbdev_client_funcs = {
424         .owner          = THIS_MODULE,
425         .unregister     = drm_fbdev_client_unregister,
426         .restore        = drm_fbdev_client_restore,
427         .hotplug        = drm_fbdev_client_hotplug,
428 };
429
430 /**
431  * drm_fbdev_generic_setup() - Setup generic fbdev emulation
432  * @dev: DRM device
433  * @preferred_bpp: Preferred bits per pixel for the device.
434  *                 @dev->mode_config.preferred_depth is used if this is zero.
435  *
436  * This function sets up generic fbdev emulation for drivers that supports
437  * dumb buffers with a virtual address and that can be mmap'ed.
438  * drm_fbdev_generic_setup() shall be called after the DRM driver registered
439  * the new DRM device with drm_dev_register().
440  *
441  * Restore, hotplug events and teardown are all taken care of. Drivers that do
442  * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
443  * Simple drivers might use drm_mode_config_helper_suspend().
444  *
445  * Drivers that set the dirty callback on their framebuffer will get a shadow
446  * fbdev buffer that is blitted onto the real buffer. This is done in order to
447  * make deferred I/O work with all kinds of buffers. A shadow buffer can be
448  * requested explicitly by setting struct drm_mode_config.prefer_shadow or
449  * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
450  * required to use generic fbdev emulation with SHMEM helpers.
451  *
452  * This function is safe to call even when there are no connectors present.
453  * Setup will be retried on the next hotplug event.
454  *
455  * The fbdev is destroyed by drm_dev_unregister().
456  */
457 void drm_fbdev_generic_setup(struct drm_device *dev,
458                              unsigned int preferred_bpp)
459 {
460         struct drm_fb_helper *fb_helper;
461         int ret;
462
463         drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
464         drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
465
466         fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
467         if (!fb_helper)
468                 return;
469
470         ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
471         if (ret) {
472                 kfree(fb_helper);
473                 drm_err(dev, "Failed to register client: %d\n", ret);
474                 return;
475         }
476
477         /*
478          * FIXME: This mixes up depth with bpp, which results in a glorious
479          * mess, resulting in some drivers picking wrong fbdev defaults and
480          * others wrong preferred_depth defaults.
481          */
482         if (!preferred_bpp)
483                 preferred_bpp = dev->mode_config.preferred_depth;
484         if (!preferred_bpp)
485                 preferred_bpp = 32;
486         fb_helper->preferred_bpp = preferred_bpp;
487
488         ret = drm_fbdev_client_hotplug(&fb_helper->client);
489         if (ret)
490                 drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
491
492         drm_client_register(&fb_helper->client);
493 }
494 EXPORT_SYMBOL(drm_fbdev_generic_setup);
This page took 0.052328 seconds and 4 git commands to generate.