]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
Merge branch 'kvm-insert-lfence' into kvm-master
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fb.c
1 /**************************************************************************
2  *
3  * Copyright © 2007 David Airlie
4  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28
29 #include <linux/export.h>
30
31 #include <drm/drmP.h>
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_kms.h"
34
35 #include <drm/ttm/ttm_placement.h>
36
37 #define VMW_DIRTY_DELAY (HZ / 30)
38
39 struct vmw_fb_par {
40         struct vmw_private *vmw_priv;
41
42         void *vmalloc;
43
44         struct mutex bo_mutex;
45         struct vmw_dma_buffer *vmw_bo;
46         struct ttm_bo_kmap_obj map;
47         void *bo_ptr;
48         unsigned bo_size;
49         struct drm_framebuffer *set_fb;
50         struct drm_display_mode *set_mode;
51         u32 fb_x;
52         u32 fb_y;
53         bool bo_iowrite;
54
55         u32 pseudo_palette[17];
56
57         unsigned max_width;
58         unsigned max_height;
59
60         struct {
61                 spinlock_t lock;
62                 bool active;
63                 unsigned x1;
64                 unsigned y1;
65                 unsigned x2;
66                 unsigned y2;
67         } dirty;
68
69         struct drm_crtc *crtc;
70         struct drm_connector *con;
71         struct delayed_work local_work;
72 };
73
74 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
75                             unsigned blue, unsigned transp,
76                             struct fb_info *info)
77 {
78         struct vmw_fb_par *par = info->par;
79         u32 *pal = par->pseudo_palette;
80
81         if (regno > 15) {
82                 DRM_ERROR("Bad regno %u.\n", regno);
83                 return 1;
84         }
85
86         switch (par->set_fb->format->depth) {
87         case 24:
88         case 32:
89                 pal[regno] = ((red & 0xff00) << 8) |
90                               (green & 0xff00) |
91                              ((blue  & 0xff00) >> 8);
92                 break;
93         default:
94                 DRM_ERROR("Bad depth %u, bpp %u.\n",
95                           par->set_fb->format->depth,
96                           par->set_fb->format->cpp[0] * 8);
97                 return 1;
98         }
99
100         return 0;
101 }
102
103 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
104                             struct fb_info *info)
105 {
106         int depth = var->bits_per_pixel;
107         struct vmw_fb_par *par = info->par;
108         struct vmw_private *vmw_priv = par->vmw_priv;
109
110         switch (var->bits_per_pixel) {
111         case 32:
112                 depth = (var->transp.length > 0) ? 32 : 24;
113                 break;
114         default:
115                 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
116                 return -EINVAL;
117         }
118
119         switch (depth) {
120         case 24:
121                 var->red.offset = 16;
122                 var->green.offset = 8;
123                 var->blue.offset = 0;
124                 var->red.length = 8;
125                 var->green.length = 8;
126                 var->blue.length = 8;
127                 var->transp.length = 0;
128                 var->transp.offset = 0;
129                 break;
130         case 32:
131                 var->red.offset = 16;
132                 var->green.offset = 8;
133                 var->blue.offset = 0;
134                 var->red.length = 8;
135                 var->green.length = 8;
136                 var->blue.length = 8;
137                 var->transp.length = 8;
138                 var->transp.offset = 24;
139                 break;
140         default:
141                 DRM_ERROR("Bad depth %u.\n", depth);
142                 return -EINVAL;
143         }
144
145         if ((var->xoffset + var->xres) > par->max_width ||
146             (var->yoffset + var->yres) > par->max_height) {
147                 DRM_ERROR("Requested geom can not fit in framebuffer\n");
148                 return -EINVAL;
149         }
150
151         if (!vmw_kms_validate_mode_vram(vmw_priv,
152                                         var->xres * var->bits_per_pixel/8,
153                                         var->yoffset + var->yres)) {
154                 DRM_ERROR("Requested geom can not fit in framebuffer\n");
155                 return -EINVAL;
156         }
157
158         return 0;
159 }
160
161 static int vmw_fb_blank(int blank, struct fb_info *info)
162 {
163         return 0;
164 }
165
166 /*
167  * Dirty code
168  */
169
170 static void vmw_fb_dirty_flush(struct work_struct *work)
171 {
172         struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
173                                               local_work.work);
174         struct vmw_private *vmw_priv = par->vmw_priv;
175         struct fb_info *info = vmw_priv->fb_info;
176         unsigned long irq_flags;
177         s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
178         u32 cpp, max_x, max_y;
179         struct drm_clip_rect clip;
180         struct drm_framebuffer *cur_fb;
181         u8 *src_ptr, *dst_ptr;
182
183         if (vmw_priv->suspended)
184                 return;
185
186         mutex_lock(&par->bo_mutex);
187         cur_fb = par->set_fb;
188         if (!cur_fb)
189                 goto out_unlock;
190
191         spin_lock_irqsave(&par->dirty.lock, irq_flags);
192         if (!par->dirty.active) {
193                 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
194                 goto out_unlock;
195         }
196
197         /*
198          * Handle panning when copying from vmalloc to framebuffer.
199          * Clip dirty area to framebuffer.
200          */
201         cpp = cur_fb->format->cpp[0];
202         max_x = par->fb_x + cur_fb->width;
203         max_y = par->fb_y + cur_fb->height;
204
205         dst_x1 = par->dirty.x1 - par->fb_x;
206         dst_y1 = par->dirty.y1 - par->fb_y;
207         dst_x1 = max_t(s32, dst_x1, 0);
208         dst_y1 = max_t(s32, dst_y1, 0);
209
210         dst_x2 = par->dirty.x2 - par->fb_x;
211         dst_y2 = par->dirty.y2 - par->fb_y;
212         dst_x2 = min_t(s32, dst_x2, max_x);
213         dst_y2 = min_t(s32, dst_y2, max_y);
214         w = dst_x2 - dst_x1;
215         h = dst_y2 - dst_y1;
216         w = max_t(s32, 0, w);
217         h = max_t(s32, 0, h);
218
219         par->dirty.x1 = par->dirty.x2 = 0;
220         par->dirty.y1 = par->dirty.y2 = 0;
221         spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
222
223         if (w && h) {
224                 dst_ptr = (u8 *)par->bo_ptr  +
225                         (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
226                 src_ptr = (u8 *)par->vmalloc +
227                         ((dst_y1 + par->fb_y) * info->fix.line_length +
228                          (dst_x1 + par->fb_x) * cpp);
229
230                 while (h-- > 0) {
231                         memcpy(dst_ptr, src_ptr, w*cpp);
232                         dst_ptr += par->set_fb->pitches[0];
233                         src_ptr += info->fix.line_length;
234                 }
235
236                 clip.x1 = dst_x1;
237                 clip.x2 = dst_x2;
238                 clip.y1 = dst_y1;
239                 clip.y2 = dst_y2;
240
241                 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
242                                                        &clip, 1));
243                 vmw_fifo_flush(vmw_priv, false);
244         }
245 out_unlock:
246         mutex_unlock(&par->bo_mutex);
247 }
248
249 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
250                               unsigned x1, unsigned y1,
251                               unsigned width, unsigned height)
252 {
253         unsigned long flags;
254         unsigned x2 = x1 + width;
255         unsigned y2 = y1 + height;
256
257         spin_lock_irqsave(&par->dirty.lock, flags);
258         if (par->dirty.x1 == par->dirty.x2) {
259                 par->dirty.x1 = x1;
260                 par->dirty.y1 = y1;
261                 par->dirty.x2 = x2;
262                 par->dirty.y2 = y2;
263                 /* if we are active start the dirty work
264                  * we share the work with the defio system */
265                 if (par->dirty.active)
266                         schedule_delayed_work(&par->local_work,
267                                               VMW_DIRTY_DELAY);
268         } else {
269                 if (x1 < par->dirty.x1)
270                         par->dirty.x1 = x1;
271                 if (y1 < par->dirty.y1)
272                         par->dirty.y1 = y1;
273                 if (x2 > par->dirty.x2)
274                         par->dirty.x2 = x2;
275                 if (y2 > par->dirty.y2)
276                         par->dirty.y2 = y2;
277         }
278         spin_unlock_irqrestore(&par->dirty.lock, flags);
279 }
280
281 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
282                               struct fb_info *info)
283 {
284         struct vmw_fb_par *par = info->par;
285
286         if ((var->xoffset + var->xres) > var->xres_virtual ||
287             (var->yoffset + var->yres) > var->yres_virtual) {
288                 DRM_ERROR("Requested panning can not fit in framebuffer\n");
289                 return -EINVAL;
290         }
291
292         mutex_lock(&par->bo_mutex);
293         par->fb_x = var->xoffset;
294         par->fb_y = var->yoffset;
295         if (par->set_fb)
296                 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
297                                   par->set_fb->height);
298         mutex_unlock(&par->bo_mutex);
299
300         return 0;
301 }
302
303 static void vmw_deferred_io(struct fb_info *info,
304                             struct list_head *pagelist)
305 {
306         struct vmw_fb_par *par = info->par;
307         unsigned long start, end, min, max;
308         unsigned long flags;
309         struct page *page;
310         int y1, y2;
311
312         min = ULONG_MAX;
313         max = 0;
314         list_for_each_entry(page, pagelist, lru) {
315                 start = page->index << PAGE_SHIFT;
316                 end = start + PAGE_SIZE - 1;
317                 min = min(min, start);
318                 max = max(max, end);
319         }
320
321         if (min < max) {
322                 y1 = min / info->fix.line_length;
323                 y2 = (max / info->fix.line_length) + 1;
324
325                 spin_lock_irqsave(&par->dirty.lock, flags);
326                 par->dirty.x1 = 0;
327                 par->dirty.y1 = y1;
328                 par->dirty.x2 = info->var.xres;
329                 par->dirty.y2 = y2;
330                 spin_unlock_irqrestore(&par->dirty.lock, flags);
331
332                 /*
333                  * Since we've already waited on this work once, try to
334                  * execute asap.
335                  */
336                 cancel_delayed_work(&par->local_work);
337                 schedule_delayed_work(&par->local_work, 0);
338         }
339 };
340
341 static struct fb_deferred_io vmw_defio = {
342         .delay          = VMW_DIRTY_DELAY,
343         .deferred_io    = vmw_deferred_io,
344 };
345
346 /*
347  * Draw code
348  */
349
350 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
351 {
352         cfb_fillrect(info, rect);
353         vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
354                           rect->width, rect->height);
355 }
356
357 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
358 {
359         cfb_copyarea(info, region);
360         vmw_fb_dirty_mark(info->par, region->dx, region->dy,
361                           region->width, region->height);
362 }
363
364 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
365 {
366         cfb_imageblit(info, image);
367         vmw_fb_dirty_mark(info->par, image->dx, image->dy,
368                           image->width, image->height);
369 }
370
371 /*
372  * Bring up code
373  */
374
375 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
376                             size_t size, struct vmw_dma_buffer **out)
377 {
378         struct vmw_dma_buffer *vmw_bo;
379         int ret;
380
381         (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
382
383         vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
384         if (!vmw_bo) {
385                 ret = -ENOMEM;
386                 goto err_unlock;
387         }
388
389         ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
390                               &vmw_sys_placement,
391                               false,
392                               &vmw_dmabuf_bo_free);
393         if (unlikely(ret != 0))
394                 goto err_unlock; /* init frees the buffer on failure */
395
396         *out = vmw_bo;
397         ttm_write_unlock(&vmw_priv->reservation_sem);
398
399         return 0;
400
401 err_unlock:
402         ttm_write_unlock(&vmw_priv->reservation_sem);
403         return ret;
404 }
405
406 static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
407                                 int *depth)
408 {
409         switch (var->bits_per_pixel) {
410         case 32:
411                 *depth = (var->transp.length > 0) ? 32 : 24;
412                 break;
413         default:
414                 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
415                 return -EINVAL;
416         }
417
418         return 0;
419 }
420
421 static int vmwgfx_set_config_internal(struct drm_mode_set *set)
422 {
423         struct drm_crtc *crtc = set->crtc;
424         struct drm_framebuffer *fb;
425         struct drm_crtc *tmp;
426         struct drm_modeset_acquire_ctx *ctx;
427         struct drm_device *dev = set->crtc->dev;
428         int ret;
429
430         ctx = dev->mode_config.acquire_ctx;
431
432 restart:
433         /*
434          * NOTE: ->set_config can also disable other crtcs (if we steal all
435          * connectors from it), hence we need to refcount the fbs across all
436          * crtcs. Atomic modeset will have saner semantics ...
437          */
438         drm_for_each_crtc(tmp, dev)
439                 tmp->primary->old_fb = tmp->primary->fb;
440
441         fb = set->fb;
442
443         ret = crtc->funcs->set_config(set, ctx);
444         if (ret == 0) {
445                 crtc->primary->crtc = crtc;
446                 crtc->primary->fb = fb;
447         }
448
449         drm_for_each_crtc(tmp, dev) {
450                 if (tmp->primary->fb)
451                         drm_framebuffer_get(tmp->primary->fb);
452                 if (tmp->primary->old_fb)
453                         drm_framebuffer_put(tmp->primary->old_fb);
454                 tmp->primary->old_fb = NULL;
455         }
456
457         if (ret == -EDEADLK) {
458                 dev->mode_config.acquire_ctx = NULL;
459
460 retry_locking:
461                 drm_modeset_backoff(ctx);
462
463                 ret = drm_modeset_lock_all_ctx(dev, ctx);
464                 if (ret)
465                         goto retry_locking;
466
467                 dev->mode_config.acquire_ctx = ctx;
468
469                 goto restart;
470         }
471
472         return ret;
473 }
474
475 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
476                              bool detach_bo,
477                              bool unref_bo)
478 {
479         struct drm_framebuffer *cur_fb = par->set_fb;
480         int ret;
481
482         /* Detach the KMS framebuffer from crtcs */
483         if (par->set_mode) {
484                 struct drm_mode_set set;
485
486                 set.crtc = par->crtc;
487                 set.x = 0;
488                 set.y = 0;
489                 set.mode = NULL;
490                 set.fb = NULL;
491                 set.num_connectors = 0;
492                 set.connectors = &par->con;
493                 ret = vmwgfx_set_config_internal(&set);
494                 if (ret) {
495                         DRM_ERROR("Could not unset a mode.\n");
496                         return ret;
497                 }
498                 drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
499                 par->set_mode = NULL;
500         }
501
502         if (cur_fb) {
503                 drm_framebuffer_unreference(cur_fb);
504                 par->set_fb = NULL;
505         }
506
507         if (par->vmw_bo && detach_bo) {
508                 struct vmw_private *vmw_priv = par->vmw_priv;
509
510                 if (par->bo_ptr) {
511                         ttm_bo_kunmap(&par->map);
512                         par->bo_ptr = NULL;
513                 }
514                 if (unref_bo)
515                         vmw_dmabuf_unreference(&par->vmw_bo);
516                 else if (vmw_priv->active_display_unit != vmw_du_legacy)
517                         vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
518         }
519
520         return 0;
521 }
522
523 static int vmw_fb_kms_framebuffer(struct fb_info *info)
524 {
525         struct drm_mode_fb_cmd2 mode_cmd;
526         struct vmw_fb_par *par = info->par;
527         struct fb_var_screeninfo *var = &info->var;
528         struct drm_framebuffer *cur_fb;
529         struct vmw_framebuffer *vfb;
530         int ret = 0, depth;
531         size_t new_bo_size;
532
533         ret = vmw_fb_compute_depth(var, &depth);
534         if (ret)
535                 return ret;
536
537         mode_cmd.width = var->xres;
538         mode_cmd.height = var->yres;
539         mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
540         mode_cmd.pixel_format =
541                 drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
542
543         cur_fb = par->set_fb;
544         if (cur_fb && cur_fb->width == mode_cmd.width &&
545             cur_fb->height == mode_cmd.height &&
546             cur_fb->format->format == mode_cmd.pixel_format &&
547             cur_fb->pitches[0] == mode_cmd.pitches[0])
548                 return 0;
549
550         /* Need new buffer object ? */
551         new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
552         ret = vmw_fb_kms_detach(par,
553                                 par->bo_size < new_bo_size ||
554                                 par->bo_size > 2*new_bo_size,
555                                 true);
556         if (ret)
557                 return ret;
558
559         if (!par->vmw_bo) {
560                 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
561                                        &par->vmw_bo);
562                 if (ret) {
563                         DRM_ERROR("Failed creating a buffer object for "
564                                   "fbdev.\n");
565                         return ret;
566                 }
567                 par->bo_size = new_bo_size;
568         }
569
570         vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
571                                       true, &mode_cmd);
572         if (IS_ERR(vfb))
573                 return PTR_ERR(vfb);
574
575         par->set_fb = &vfb->base;
576
577         return 0;
578 }
579
580 static int vmw_fb_set_par(struct fb_info *info)
581 {
582         struct vmw_fb_par *par = info->par;
583         struct vmw_private *vmw_priv = par->vmw_priv;
584         struct drm_mode_set set;
585         struct fb_var_screeninfo *var = &info->var;
586         struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
587                 DRM_MODE_TYPE_DRIVER,
588                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
589                 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
590         };
591         struct drm_display_mode *old_mode;
592         struct drm_display_mode *mode;
593         int ret;
594
595         old_mode = par->set_mode;
596         mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
597         if (!mode) {
598                 DRM_ERROR("Could not create new fb mode.\n");
599                 return -ENOMEM;
600         }
601
602         mode->hdisplay = var->xres;
603         mode->vdisplay = var->yres;
604         vmw_guess_mode_timing(mode);
605
606         if (old_mode && drm_mode_equal(old_mode, mode)) {
607                 drm_mode_destroy(vmw_priv->dev, mode);
608                 mode = old_mode;
609                 old_mode = NULL;
610         } else if (!vmw_kms_validate_mode_vram(vmw_priv,
611                                         mode->hdisplay *
612                                         DIV_ROUND_UP(var->bits_per_pixel, 8),
613                                         mode->vdisplay)) {
614                 drm_mode_destroy(vmw_priv->dev, mode);
615                 return -EINVAL;
616         }
617
618         mutex_lock(&par->bo_mutex);
619         drm_modeset_lock_all(vmw_priv->dev);
620         ret = vmw_fb_kms_framebuffer(info);
621         if (ret)
622                 goto out_unlock;
623
624         par->fb_x = var->xoffset;
625         par->fb_y = var->yoffset;
626
627         set.crtc = par->crtc;
628         set.x = 0;
629         set.y = 0;
630         set.mode = mode;
631         set.fb = par->set_fb;
632         set.num_connectors = 1;
633         set.connectors = &par->con;
634
635         ret = vmwgfx_set_config_internal(&set);
636         if (ret)
637                 goto out_unlock;
638
639         if (!par->bo_ptr) {
640                 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
641
642                 /*
643                  * Pin before mapping. Since we don't know in what placement
644                  * to pin, call into KMS to do it for us.  LDU doesn't require
645                  * additional pinning because set_config() would've pinned
646                  * it already
647                  */
648                 if (vmw_priv->active_display_unit != vmw_du_legacy) {
649                         ret = vfb->pin(vfb);
650                         if (ret) {
651                                 DRM_ERROR("Could not pin the fbdev "
652                                           "framebuffer.\n");
653                                 goto out_unlock;
654                         }
655                 }
656
657                 ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
658                                   par->vmw_bo->base.num_pages, &par->map);
659                 if (ret) {
660                         if (vmw_priv->active_display_unit != vmw_du_legacy)
661                                 vfb->unpin(vfb);
662
663                         DRM_ERROR("Could not map the fbdev framebuffer.\n");
664                         goto out_unlock;
665                 }
666
667                 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
668         }
669
670
671         vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
672                           par->set_fb->width, par->set_fb->height);
673
674         /* If there already was stuff dirty we wont
675          * schedule a new work, so lets do it now */
676
677         schedule_delayed_work(&par->local_work, 0);
678
679 out_unlock:
680         if (old_mode)
681                 drm_mode_destroy(vmw_priv->dev, old_mode);
682         par->set_mode = mode;
683
684         drm_modeset_unlock_all(vmw_priv->dev);
685         mutex_unlock(&par->bo_mutex);
686
687         return ret;
688 }
689
690
691 static struct fb_ops vmw_fb_ops = {
692         .owner = THIS_MODULE,
693         .fb_check_var = vmw_fb_check_var,
694         .fb_set_par = vmw_fb_set_par,
695         .fb_setcolreg = vmw_fb_setcolreg,
696         .fb_fillrect = vmw_fb_fillrect,
697         .fb_copyarea = vmw_fb_copyarea,
698         .fb_imageblit = vmw_fb_imageblit,
699         .fb_pan_display = vmw_fb_pan_display,
700         .fb_blank = vmw_fb_blank,
701 };
702
703 int vmw_fb_init(struct vmw_private *vmw_priv)
704 {
705         struct device *device = &vmw_priv->dev->pdev->dev;
706         struct vmw_fb_par *par;
707         struct fb_info *info;
708         unsigned fb_width, fb_height;
709         unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
710         struct drm_display_mode *init_mode;
711         int ret;
712
713         fb_bpp = 32;
714         fb_depth = 24;
715
716         /* XXX As shouldn't these be as well. */
717         fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
718         fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
719
720         fb_pitch = fb_width * fb_bpp / 8;
721         fb_size = fb_pitch * fb_height;
722         fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
723
724         info = framebuffer_alloc(sizeof(*par), device);
725         if (!info)
726                 return -ENOMEM;
727
728         /*
729          * Par
730          */
731         vmw_priv->fb_info = info;
732         par = info->par;
733         memset(par, 0, sizeof(*par));
734         INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
735         par->vmw_priv = vmw_priv;
736         par->vmalloc = NULL;
737         par->max_width = fb_width;
738         par->max_height = fb_height;
739
740         drm_modeset_lock_all(vmw_priv->dev);
741         ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
742                                       par->max_height, &par->con,
743                                       &par->crtc, &init_mode);
744         if (ret) {
745                 drm_modeset_unlock_all(vmw_priv->dev);
746                 goto err_kms;
747         }
748
749         info->var.xres = init_mode->hdisplay;
750         info->var.yres = init_mode->vdisplay;
751         drm_modeset_unlock_all(vmw_priv->dev);
752
753         /*
754          * Create buffers and alloc memory
755          */
756         par->vmalloc = vzalloc(fb_size);
757         if (unlikely(par->vmalloc == NULL)) {
758                 ret = -ENOMEM;
759                 goto err_free;
760         }
761
762         /*
763          * Fixed and var
764          */
765         strcpy(info->fix.id, "svgadrmfb");
766         info->fix.type = FB_TYPE_PACKED_PIXELS;
767         info->fix.visual = FB_VISUAL_TRUECOLOR;
768         info->fix.type_aux = 0;
769         info->fix.xpanstep = 1; /* doing it in hw */
770         info->fix.ypanstep = 1; /* doing it in hw */
771         info->fix.ywrapstep = 0;
772         info->fix.accel = FB_ACCEL_NONE;
773         info->fix.line_length = fb_pitch;
774
775         info->fix.smem_start = 0;
776         info->fix.smem_len = fb_size;
777
778         info->pseudo_palette = par->pseudo_palette;
779         info->screen_base = (char __iomem *)par->vmalloc;
780         info->screen_size = fb_size;
781
782         info->fbops = &vmw_fb_ops;
783
784         /* 24 depth per default */
785         info->var.red.offset = 16;
786         info->var.green.offset = 8;
787         info->var.blue.offset = 0;
788         info->var.red.length = 8;
789         info->var.green.length = 8;
790         info->var.blue.length = 8;
791         info->var.transp.offset = 0;
792         info->var.transp.length = 0;
793
794         info->var.xres_virtual = fb_width;
795         info->var.yres_virtual = fb_height;
796         info->var.bits_per_pixel = fb_bpp;
797         info->var.xoffset = 0;
798         info->var.yoffset = 0;
799         info->var.activate = FB_ACTIVATE_NOW;
800         info->var.height = -1;
801         info->var.width = -1;
802
803         /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
804         info->apertures = alloc_apertures(1);
805         if (!info->apertures) {
806                 ret = -ENOMEM;
807                 goto err_aper;
808         }
809         info->apertures->ranges[0].base = vmw_priv->vram_start;
810         info->apertures->ranges[0].size = vmw_priv->vram_size;
811
812         /*
813          * Dirty & Deferred IO
814          */
815         par->dirty.x1 = par->dirty.x2 = 0;
816         par->dirty.y1 = par->dirty.y2 = 0;
817         par->dirty.active = true;
818         spin_lock_init(&par->dirty.lock);
819         mutex_init(&par->bo_mutex);
820         info->fbdefio = &vmw_defio;
821         fb_deferred_io_init(info);
822
823         ret = register_framebuffer(info);
824         if (unlikely(ret != 0))
825                 goto err_defio;
826
827         vmw_fb_set_par(info);
828
829         return 0;
830
831 err_defio:
832         fb_deferred_io_cleanup(info);
833 err_aper:
834 err_free:
835         vfree(par->vmalloc);
836 err_kms:
837         framebuffer_release(info);
838         vmw_priv->fb_info = NULL;
839
840         return ret;
841 }
842
843 int vmw_fb_close(struct vmw_private *vmw_priv)
844 {
845         struct fb_info *info;
846         struct vmw_fb_par *par;
847
848         if (!vmw_priv->fb_info)
849                 return 0;
850
851         info = vmw_priv->fb_info;
852         par = info->par;
853
854         /* ??? order */
855         fb_deferred_io_cleanup(info);
856         cancel_delayed_work_sync(&par->local_work);
857         unregister_framebuffer(info);
858
859         (void) vmw_fb_kms_detach(par, true, true);
860
861         vfree(par->vmalloc);
862         framebuffer_release(info);
863
864         return 0;
865 }
866
867 int vmw_fb_off(struct vmw_private *vmw_priv)
868 {
869         struct fb_info *info;
870         struct vmw_fb_par *par;
871         unsigned long flags;
872
873         if (!vmw_priv->fb_info)
874                 return -EINVAL;
875
876         info = vmw_priv->fb_info;
877         par = info->par;
878
879         spin_lock_irqsave(&par->dirty.lock, flags);
880         par->dirty.active = false;
881         spin_unlock_irqrestore(&par->dirty.lock, flags);
882
883         flush_delayed_work(&info->deferred_work);
884         flush_delayed_work(&par->local_work);
885
886         mutex_lock(&par->bo_mutex);
887         drm_modeset_lock_all(vmw_priv->dev);
888         (void) vmw_fb_kms_detach(par, true, false);
889         drm_modeset_unlock_all(vmw_priv->dev);
890         mutex_unlock(&par->bo_mutex);
891
892         return 0;
893 }
894
895 int vmw_fb_on(struct vmw_private *vmw_priv)
896 {
897         struct fb_info *info;
898         struct vmw_fb_par *par;
899         unsigned long flags;
900
901         if (!vmw_priv->fb_info)
902                 return -EINVAL;
903
904         info = vmw_priv->fb_info;
905         par = info->par;
906
907         vmw_fb_set_par(info);
908         spin_lock_irqsave(&par->dirty.lock, flags);
909         par->dirty.active = true;
910         spin_unlock_irqrestore(&par->dirty.lock, flags);
911  
912         return 0;
913 }
This page took 0.089278 seconds and 4 git commands to generate.