]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/display/intel_fbdev.c
Merge tag 'drm-intel-next-2022-10-28' of git://anongit.freedesktop.org/drm/drm-intel...
[linux.git] / drivers / gpu / drm / i915 / display / intel_fbdev.c
1 /*
2  * Copyright © 2007 David Airlie
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *     David Airlie
25  */
26
27 #include <linux/async.h>
28 #include <linux/console.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/init.h>
32 #include <linux/kernel.h>
33 #include <linux/mm.h>
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/sysrq.h>
37 #include <linux/tty.h>
38 #include <linux/vga_switcheroo.h>
39
40 #include <drm/drm_crtc.h>
41 #include <drm/drm_fb_helper.h>
42 #include <drm/drm_fourcc.h>
43
44 #include "gem/i915_gem_lmem.h"
45
46 #include "i915_drv.h"
47 #include "intel_display_types.h"
48 #include "intel_fb.h"
49 #include "intel_fb_pin.h"
50 #include "intel_fbdev.h"
51 #include "intel_frontbuffer.h"
52
53 struct intel_fbdev {
54         struct drm_fb_helper helper;
55         struct intel_framebuffer *fb;
56         struct i915_vma *vma;
57         unsigned long vma_flags;
58         async_cookie_t cookie;
59         int preferred_bpp;
60
61         /* Whether or not fbdev hpd processing is temporarily suspended */
62         bool hpd_suspended: 1;
63         /* Set when a hotplug was received while HPD processing was suspended */
64         bool hpd_waiting: 1;
65
66         /* Protects hpd_suspended */
67         struct mutex hpd_lock;
68 };
69
70 static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
71 {
72         return ifbdev->fb->frontbuffer;
73 }
74
75 static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
76 {
77         intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
78 }
79
80 static int intel_fbdev_set_par(struct fb_info *info)
81 {
82         struct drm_fb_helper *fb_helper = info->par;
83         struct intel_fbdev *ifbdev =
84                 container_of(fb_helper, struct intel_fbdev, helper);
85         int ret;
86
87         ret = drm_fb_helper_set_par(info);
88         if (ret == 0)
89                 intel_fbdev_invalidate(ifbdev);
90
91         return ret;
92 }
93
94 static int intel_fbdev_blank(int blank, struct fb_info *info)
95 {
96         struct drm_fb_helper *fb_helper = info->par;
97         struct intel_fbdev *ifbdev =
98                 container_of(fb_helper, struct intel_fbdev, helper);
99         int ret;
100
101         ret = drm_fb_helper_blank(blank, info);
102         if (ret == 0)
103                 intel_fbdev_invalidate(ifbdev);
104
105         return ret;
106 }
107
108 static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
109                                    struct fb_info *info)
110 {
111         struct drm_fb_helper *fb_helper = info->par;
112         struct intel_fbdev *ifbdev =
113                 container_of(fb_helper, struct intel_fbdev, helper);
114         int ret;
115
116         ret = drm_fb_helper_pan_display(var, info);
117         if (ret == 0)
118                 intel_fbdev_invalidate(ifbdev);
119
120         return ret;
121 }
122
123 static const struct fb_ops intelfb_ops = {
124         .owner = THIS_MODULE,
125         DRM_FB_HELPER_DEFAULT_OPS,
126         .fb_set_par = intel_fbdev_set_par,
127         .fb_fillrect = drm_fb_helper_cfb_fillrect,
128         .fb_copyarea = drm_fb_helper_cfb_copyarea,
129         .fb_imageblit = drm_fb_helper_cfb_imageblit,
130         .fb_pan_display = intel_fbdev_pan_display,
131         .fb_blank = intel_fbdev_blank,
132 };
133
134 static int intelfb_alloc(struct drm_fb_helper *helper,
135                          struct drm_fb_helper_surface_size *sizes)
136 {
137         struct intel_fbdev *ifbdev =
138                 container_of(helper, struct intel_fbdev, helper);
139         struct drm_framebuffer *fb;
140         struct drm_device *dev = helper->dev;
141         struct drm_i915_private *dev_priv = to_i915(dev);
142         struct drm_mode_fb_cmd2 mode_cmd = {};
143         struct drm_i915_gem_object *obj;
144         int size;
145
146         /* we don't do packed 24bpp */
147         if (sizes->surface_bpp == 24)
148                 sizes->surface_bpp = 32;
149
150         mode_cmd.width = sizes->surface_width;
151         mode_cmd.height = sizes->surface_height;
152
153         mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
154                                     DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
155         mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
156                                                           sizes->surface_depth);
157
158         size = mode_cmd.pitches[0] * mode_cmd.height;
159         size = PAGE_ALIGN(size);
160
161         obj = ERR_PTR(-ENODEV);
162         if (HAS_LMEM(dev_priv)) {
163                 obj = i915_gem_object_create_lmem(dev_priv, size,
164                                                   I915_BO_ALLOC_CONTIGUOUS);
165         } else {
166                 /*
167                  * If the FB is too big, just don't use it since fbdev is not very
168                  * important and we should probably use that space with FBC or other
169                  * features.
170                  */
171                 if (size * 2 < dev_priv->stolen_usable_size)
172                         obj = i915_gem_object_create_stolen(dev_priv, size);
173                 if (IS_ERR(obj))
174                         obj = i915_gem_object_create_shmem(dev_priv, size);
175         }
176
177         if (IS_ERR(obj)) {
178                 drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
179                 return PTR_ERR(obj);
180         }
181
182         fb = intel_framebuffer_create(obj, &mode_cmd);
183         i915_gem_object_put(obj);
184         if (IS_ERR(fb))
185                 return PTR_ERR(fb);
186
187         ifbdev->fb = to_intel_framebuffer(fb);
188         return 0;
189 }
190
191 static int intelfb_create(struct drm_fb_helper *helper,
192                           struct drm_fb_helper_surface_size *sizes)
193 {
194         struct intel_fbdev *ifbdev =
195                 container_of(helper, struct intel_fbdev, helper);
196         struct intel_framebuffer *intel_fb = ifbdev->fb;
197         struct drm_device *dev = helper->dev;
198         struct drm_i915_private *dev_priv = to_i915(dev);
199         struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
200         struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt;
201         const struct i915_gtt_view view = {
202                 .type = I915_GTT_VIEW_NORMAL,
203         };
204         intel_wakeref_t wakeref;
205         struct fb_info *info;
206         struct i915_vma *vma;
207         unsigned long flags = 0;
208         bool prealloc = false;
209         void __iomem *vaddr;
210         struct drm_i915_gem_object *obj;
211         int ret;
212
213         mutex_lock(&ifbdev->hpd_lock);
214         ret = ifbdev->hpd_suspended ? -EAGAIN : 0;
215         mutex_unlock(&ifbdev->hpd_lock);
216         if (ret)
217                 return ret;
218
219         if (intel_fb &&
220             (sizes->fb_width > intel_fb->base.width ||
221              sizes->fb_height > intel_fb->base.height)) {
222                 drm_dbg_kms(&dev_priv->drm,
223                             "BIOS fb too small (%dx%d), we require (%dx%d),"
224                             " releasing it\n",
225                             intel_fb->base.width, intel_fb->base.height,
226                             sizes->fb_width, sizes->fb_height);
227                 drm_framebuffer_put(&intel_fb->base);
228                 intel_fb = ifbdev->fb = NULL;
229         }
230         if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
231                 drm_dbg_kms(&dev_priv->drm,
232                             "no BIOS fb, allocating a new one\n");
233                 ret = intelfb_alloc(helper, sizes);
234                 if (ret)
235                         return ret;
236                 intel_fb = ifbdev->fb;
237         } else {
238                 drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
239                 prealloc = true;
240                 sizes->fb_width = intel_fb->base.width;
241                 sizes->fb_height = intel_fb->base.height;
242         }
243
244         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
245
246         /* Pin the GGTT vma for our access via info->screen_base.
247          * This also validates that any existing fb inherited from the
248          * BIOS is suitable for own access.
249          */
250         vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, false,
251                                          &view, false, &flags);
252         if (IS_ERR(vma)) {
253                 ret = PTR_ERR(vma);
254                 goto out_unlock;
255         }
256
257         info = drm_fb_helper_alloc_fbi(helper);
258         if (IS_ERR(info)) {
259                 drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
260                 ret = PTR_ERR(info);
261                 goto out_unpin;
262         }
263
264         ifbdev->helper.fb = &ifbdev->fb->base;
265
266         info->fbops = &intelfb_ops;
267
268         /* setup aperture base/size for vesafb takeover */
269         obj = intel_fb_obj(&intel_fb->base);
270         if (i915_gem_object_is_lmem(obj)) {
271                 struct intel_memory_region *mem = obj->mm.region;
272
273                 info->apertures->ranges[0].base = mem->io_start;
274                 info->apertures->ranges[0].size = mem->io_size;
275
276                 /* Use fbdev's framebuffer from lmem for discrete */
277                 info->fix.smem_start =
278                         (unsigned long)(mem->io_start +
279                                         i915_gem_object_get_dma_address(obj, 0));
280                 info->fix.smem_len = obj->base.size;
281         } else {
282                 info->apertures->ranges[0].base = ggtt->gmadr.start;
283                 info->apertures->ranges[0].size = ggtt->mappable_end;
284
285                 /* Our framebuffer is the entirety of fbdev's system memory */
286                 info->fix.smem_start =
287                         (unsigned long)(ggtt->gmadr.start + vma->node.start);
288                 info->fix.smem_len = vma->size;
289         }
290
291         vaddr = i915_vma_pin_iomap(vma);
292         if (IS_ERR(vaddr)) {
293                 drm_err(&dev_priv->drm,
294                         "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
295                 ret = PTR_ERR(vaddr);
296                 goto out_unpin;
297         }
298         info->screen_base = vaddr;
299         info->screen_size = vma->size;
300
301         drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
302
303         /* If the object is shmemfs backed, it will have given us zeroed pages.
304          * If the object is stolen however, it will be full of whatever
305          * garbage was left in there.
306          */
307         if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
308                 memset_io(info->screen_base, 0, info->screen_size);
309
310         /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
311
312         drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
313                     ifbdev->fb->base.width, ifbdev->fb->base.height,
314                     i915_ggtt_offset(vma));
315         ifbdev->vma = vma;
316         ifbdev->vma_flags = flags;
317
318         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
319         vga_switcheroo_client_fb_set(pdev, info);
320         return 0;
321
322 out_unpin:
323         intel_unpin_fb_vma(vma, flags);
324 out_unlock:
325         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
326         return ret;
327 }
328
329 static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
330         .fb_probe = intelfb_create,
331 };
332
333 static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
334 {
335         /* We rely on the object-free to release the VMA pinning for
336          * the info->screen_base mmaping. Leaking the VMA is simpler than
337          * trying to rectify all the possible error paths leading here.
338          */
339
340         drm_fb_helper_fini(&ifbdev->helper);
341
342         if (ifbdev->vma)
343                 intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
344
345         if (ifbdev->fb)
346                 drm_framebuffer_remove(&ifbdev->fb->base);
347
348         kfree(ifbdev);
349 }
350
351 /*
352  * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
353  * The core display code will have read out the current plane configuration,
354  * so we use that to figure out if there's an object for us to use as the
355  * fb, and if so, we re-use it for the fbdev configuration.
356  *
357  * Note we only support a single fb shared across pipes for boot (mostly for
358  * fbcon), so we just find the biggest and use that.
359  */
360 static bool intel_fbdev_init_bios(struct drm_device *dev,
361                                   struct intel_fbdev *ifbdev)
362 {
363         struct drm_i915_private *i915 = to_i915(dev);
364         struct intel_framebuffer *fb = NULL;
365         struct intel_crtc *crtc;
366         unsigned int max_size = 0;
367
368         /* Find the largest fb */
369         for_each_intel_crtc(dev, crtc) {
370                 struct intel_crtc_state *crtc_state =
371                         to_intel_crtc_state(crtc->base.state);
372                 struct intel_plane *plane =
373                         to_intel_plane(crtc->base.primary);
374                 struct intel_plane_state *plane_state =
375                         to_intel_plane_state(plane->base.state);
376                 struct drm_i915_gem_object *obj =
377                         intel_fb_obj(plane_state->uapi.fb);
378
379                 if (!crtc_state->uapi.active) {
380                         drm_dbg_kms(&i915->drm,
381                                     "[CRTC:%d:%s] not active, skipping\n",
382                                     crtc->base.base.id, crtc->base.name);
383                         continue;
384                 }
385
386                 if (!obj) {
387                         drm_dbg_kms(&i915->drm,
388                                     "[PLANE:%d:%s] no fb, skipping\n",
389                                     plane->base.base.id, plane->base.name);
390                         continue;
391                 }
392
393                 if (obj->base.size > max_size) {
394                         drm_dbg_kms(&i915->drm,
395                                     "found possible fb from [PLANE:%d:%s]\n",
396                                     plane->base.base.id, plane->base.name);
397                         fb = to_intel_framebuffer(plane_state->uapi.fb);
398                         max_size = obj->base.size;
399                 }
400         }
401
402         if (!fb) {
403                 drm_dbg_kms(&i915->drm,
404                             "no active fbs found, not using BIOS config\n");
405                 goto out;
406         }
407
408         /* Now make sure all the pipes will fit into it */
409         for_each_intel_crtc(dev, crtc) {
410                 struct intel_crtc_state *crtc_state =
411                         to_intel_crtc_state(crtc->base.state);
412                 struct intel_plane *plane =
413                         to_intel_plane(crtc->base.primary);
414                 unsigned int cur_size;
415
416                 if (!crtc_state->uapi.active) {
417                         drm_dbg_kms(&i915->drm,
418                                     "[CRTC:%d:%s] not active, skipping\n",
419                                     crtc->base.base.id, crtc->base.name);
420                         continue;
421                 }
422
423                 drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
424                             plane->base.base.id, plane->base.name);
425
426                 /*
427                  * See if the plane fb we found above will fit on this
428                  * pipe.  Note we need to use the selected fb's pitch and bpp
429                  * rather than the current pipe's, since they differ.
430                  */
431                 cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay;
432                 cur_size = cur_size * fb->base.format->cpp[0];
433                 if (fb->base.pitches[0] < cur_size) {
434                         drm_dbg_kms(&i915->drm,
435                                     "fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n",
436                                     plane->base.base.id, plane->base.name,
437                                     cur_size, fb->base.pitches[0]);
438                         fb = NULL;
439                         break;
440                 }
441
442                 cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
443                 cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
444                 cur_size *= fb->base.pitches[0];
445                 drm_dbg_kms(&i915->drm,
446                             "[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n",
447                             crtc->base.base.id, crtc->base.name,
448                             crtc_state->uapi.adjusted_mode.crtc_hdisplay,
449                             crtc_state->uapi.adjusted_mode.crtc_vdisplay,
450                             fb->base.format->cpp[0] * 8,
451                             cur_size);
452
453                 if (cur_size > max_size) {
454                         drm_dbg_kms(&i915->drm,
455                                     "fb not big enough for [PLANE:%d:%s] (%d vs %d)\n",
456                                     plane->base.base.id, plane->base.name,
457                                     cur_size, max_size);
458                         fb = NULL;
459                         break;
460                 }
461
462                 drm_dbg_kms(&i915->drm,
463                             "fb big enough [PLANE:%d:%s] (%d >= %d)\n",
464                             plane->base.base.id, plane->base.name,
465                             max_size, cur_size);
466         }
467
468         if (!fb) {
469                 drm_dbg_kms(&i915->drm,
470                             "BIOS fb not suitable for all pipes, not using\n");
471                 goto out;
472         }
473
474         ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
475         ifbdev->fb = fb;
476
477         drm_framebuffer_get(&ifbdev->fb->base);
478
479         /* Final pass to check if any active pipes don't have fbs */
480         for_each_intel_crtc(dev, crtc) {
481                 struct intel_crtc_state *crtc_state =
482                         to_intel_crtc_state(crtc->base.state);
483                 struct intel_plane *plane =
484                         to_intel_plane(crtc->base.primary);
485                 struct intel_plane_state *plane_state =
486                         to_intel_plane_state(plane->base.state);
487
488                 if (!crtc_state->uapi.active)
489                         continue;
490
491                 drm_WARN(dev, !plane_state->uapi.fb,
492                          "re-used BIOS config but lost an fb on [PLANE:%d:%s]\n",
493                          plane->base.base.id, plane->base.name);
494         }
495
496
497         drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
498         return true;
499
500 out:
501
502         return false;
503 }
504
505 static void intel_fbdev_suspend_worker(struct work_struct *work)
506 {
507         intel_fbdev_set_suspend(&container_of(work,
508                                               struct drm_i915_private,
509                                               display.fbdev.suspend_work)->drm,
510                                 FBINFO_STATE_RUNNING,
511                                 true);
512 }
513
514 int intel_fbdev_init(struct drm_device *dev)
515 {
516         struct drm_i915_private *dev_priv = to_i915(dev);
517         struct intel_fbdev *ifbdev;
518         int ret;
519
520         if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv)))
521                 return -ENODEV;
522
523         ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
524         if (ifbdev == NULL)
525                 return -ENOMEM;
526
527         mutex_init(&ifbdev->hpd_lock);
528         drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
529
530         if (!intel_fbdev_init_bios(dev, ifbdev))
531                 ifbdev->preferred_bpp = 32;
532
533         ret = drm_fb_helper_init(dev, &ifbdev->helper);
534         if (ret) {
535                 kfree(ifbdev);
536                 return ret;
537         }
538
539         dev_priv->display.fbdev.fbdev = ifbdev;
540         INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
541
542         return 0;
543 }
544
545 static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
546 {
547         struct intel_fbdev *ifbdev = data;
548
549         /* Due to peculiar init order wrt to hpd handling this is separate. */
550         if (drm_fb_helper_initial_config(&ifbdev->helper,
551                                          ifbdev->preferred_bpp))
552                 intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
553 }
554
555 void intel_fbdev_initial_config_async(struct drm_device *dev)
556 {
557         struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
558
559         if (!ifbdev)
560                 return;
561
562         ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
563 }
564
565 static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
566 {
567         if (!ifbdev->cookie)
568                 return;
569
570         /* Only serialises with all preceding async calls, hence +1 */
571         async_synchronize_cookie(ifbdev->cookie + 1);
572         ifbdev->cookie = 0;
573 }
574
575 void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
576 {
577         struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
578
579         if (!ifbdev)
580                 return;
581
582         intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true);
583
584         if (!current_is_async())
585                 intel_fbdev_sync(ifbdev);
586
587         drm_fb_helper_unregister_fbi(&ifbdev->helper);
588 }
589
590 void intel_fbdev_fini(struct drm_i915_private *dev_priv)
591 {
592         struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev);
593
594         if (!ifbdev)
595                 return;
596
597         intel_fbdev_destroy(ifbdev);
598 }
599
600 /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
601  * processing, fbdev will perform a full connector reprobe if a hotplug event
602  * was received while HPD was suspended.
603  */
604 static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
605 {
606         struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev;
607         bool send_hpd = false;
608
609         mutex_lock(&ifbdev->hpd_lock);
610         ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
611         send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
612         ifbdev->hpd_waiting = false;
613         mutex_unlock(&ifbdev->hpd_lock);
614
615         if (send_hpd) {
616                 drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n");
617                 drm_fb_helper_hotplug_event(&ifbdev->helper);
618         }
619 }
620
621 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
622 {
623         struct drm_i915_private *dev_priv = to_i915(dev);
624         struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
625         struct fb_info *info;
626
627         if (!ifbdev || !ifbdev->vma)
628                 goto set_suspend;
629
630         info = ifbdev->helper.fbdev;
631
632         if (synchronous) {
633                 /* Flush any pending work to turn the console on, and then
634                  * wait to turn it off. It must be synchronous as we are
635                  * about to suspend or unload the driver.
636                  *
637                  * Note that from within the work-handler, we cannot flush
638                  * ourselves, so only flush outstanding work upon suspend!
639                  */
640                 if (state != FBINFO_STATE_RUNNING)
641                         flush_work(&dev_priv->display.fbdev.suspend_work);
642
643                 console_lock();
644         } else {
645                 /*
646                  * The console lock can be pretty contented on resume due
647                  * to all the printk activity.  Try to keep it out of the hot
648                  * path of resume if possible.
649                  */
650                 drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING);
651                 if (!console_trylock()) {
652                         /* Don't block our own workqueue as this can
653                          * be run in parallel with other i915.ko tasks.
654                          */
655                         schedule_work(&dev_priv->display.fbdev.suspend_work);
656                         return;
657                 }
658         }
659
660         /* On resume from hibernation: If the object is shmemfs backed, it has
661          * been restored from swap. If the object is stolen however, it will be
662          * full of whatever garbage was left in there.
663          */
664         if (state == FBINFO_STATE_RUNNING &&
665             !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base)))
666                 memset_io(info->screen_base, 0, info->screen_size);
667
668         drm_fb_helper_set_suspend(&ifbdev->helper, state);
669         console_unlock();
670
671 set_suspend:
672         intel_fbdev_hpd_set_suspend(dev_priv, state);
673 }
674
675 void intel_fbdev_output_poll_changed(struct drm_device *dev)
676 {
677         struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
678         bool send_hpd;
679
680         if (!ifbdev)
681                 return;
682
683         intel_fbdev_sync(ifbdev);
684
685         mutex_lock(&ifbdev->hpd_lock);
686         send_hpd = !ifbdev->hpd_suspended;
687         ifbdev->hpd_waiting = true;
688         mutex_unlock(&ifbdev->hpd_lock);
689
690         if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
691                 drm_fb_helper_hotplug_event(&ifbdev->helper);
692 }
693
694 void intel_fbdev_restore_mode(struct drm_device *dev)
695 {
696         struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
697
698         if (!ifbdev)
699                 return;
700
701         intel_fbdev_sync(ifbdev);
702         if (!ifbdev->vma)
703                 return;
704
705         if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
706                 intel_fbdev_invalidate(ifbdev);
707 }
708
709 struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
710 {
711         if (!fbdev || !fbdev->helper.fb)
712                 return NULL;
713
714         return to_intel_framebuffer(fbdev->helper.fb);
715 }
This page took 0.096083 seconds and 4 git commands to generate.