]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_kms.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include "vmwgfx_kms.h"
28
29 #include "vmwgfx_bo.h"
30 #include "vmw_surface_cache.h"
31
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_damage_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_sysfs.h>
38
39 void vmw_du_cleanup(struct vmw_display_unit *du)
40 {
41         struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
42         drm_plane_cleanup(&du->primary);
43         if (vmw_cmd_supported(dev_priv))
44                 drm_plane_cleanup(&du->cursor.base);
45
46         drm_connector_unregister(&du->connector);
47         drm_crtc_cleanup(&du->crtc);
48         drm_encoder_cleanup(&du->encoder);
49         drm_connector_cleanup(&du->connector);
50 }
51
52 /*
53  * Display Unit Cursor functions
54  */
55
56 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
57 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
58                                   struct vmw_plane_state *vps,
59                                   u32 *image, u32 width, u32 height,
60                                   u32 hotspotX, u32 hotspotY);
61
62 struct vmw_svga_fifo_cmd_define_cursor {
63         u32 cmd;
64         SVGAFifoCmdDefineAlphaCursor cursor;
65 };
66
67 /**
68  * vmw_send_define_cursor_cmd - queue a define cursor command
69  * @dev_priv: the private driver struct
70  * @image: buffer which holds the cursor image
71  * @width: width of the mouse cursor image
72  * @height: height of the mouse cursor image
73  * @hotspotX: the horizontal position of mouse hotspot
74  * @hotspotY: the vertical position of mouse hotspot
75  */
76 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
77                                        u32 *image, u32 width, u32 height,
78                                        u32 hotspotX, u32 hotspotY)
79 {
80         struct vmw_svga_fifo_cmd_define_cursor *cmd;
81         const u32 image_size = width * height * sizeof(*image);
82         const u32 cmd_size = sizeof(*cmd) + image_size;
83
84         /* Try to reserve fifocmd space and swallow any failures;
85            such reservations cannot be left unconsumed for long
86            under the risk of clogging other fifocmd users, so
87            we treat reservations separtely from the way we treat
88            other fallible KMS-atomic resources at prepare_fb */
89         cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
90
91         if (unlikely(!cmd))
92                 return;
93
94         memset(cmd, 0, sizeof(*cmd));
95
96         memcpy(&cmd[1], image, image_size);
97
98         cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
99         cmd->cursor.id = 0;
100         cmd->cursor.width = width;
101         cmd->cursor.height = height;
102         cmd->cursor.hotspotX = hotspotX;
103         cmd->cursor.hotspotY = hotspotY;
104
105         vmw_cmd_commit_flush(dev_priv, cmd_size);
106 }
107
108 /**
109  * vmw_cursor_update_image - update the cursor image on the provided plane
110  * @dev_priv: the private driver struct
111  * @vps: the plane state of the cursor plane
112  * @image: buffer which holds the cursor image
113  * @width: width of the mouse cursor image
114  * @height: height of the mouse cursor image
115  * @hotspotX: the horizontal position of mouse hotspot
116  * @hotspotY: the vertical position of mouse hotspot
117  */
118 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
119                                     struct vmw_plane_state *vps,
120                                     u32 *image, u32 width, u32 height,
121                                     u32 hotspotX, u32 hotspotY)
122 {
123         if (vps->cursor.bo)
124                 vmw_cursor_update_mob(dev_priv, vps, image,
125                                       vps->base.crtc_w, vps->base.crtc_h,
126                                       hotspotX, hotspotY);
127
128         else
129                 vmw_send_define_cursor_cmd(dev_priv, image, width, height,
130                                            hotspotX, hotspotY);
131 }
132
133
134 /**
135  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
136  *
137  * Called from inside vmw_du_cursor_plane_atomic_update to actually
138  * make the cursor-image live.
139  *
140  * @dev_priv: device to work with
141  * @vps: the plane state of the cursor plane
142  * @image: cursor source data to fill the MOB with
143  * @width: source data width
144  * @height: source data height
145  * @hotspotX: cursor hotspot x
146  * @hotspotY: cursor hotspot Y
147  */
148 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
149                                   struct vmw_plane_state *vps,
150                                   u32 *image, u32 width, u32 height,
151                                   u32 hotspotX, u32 hotspotY)
152 {
153         SVGAGBCursorHeader *header;
154         SVGAGBAlphaCursorHeader *alpha_header;
155         const u32 image_size = width * height * sizeof(*image);
156
157         header = vmw_bo_map_and_cache(vps->cursor.bo);
158         alpha_header = &header->header.alphaHeader;
159
160         memset(header, 0, sizeof(*header));
161
162         header->type = SVGA_ALPHA_CURSOR;
163         header->sizeInBytes = image_size;
164
165         alpha_header->hotspotX = hotspotX;
166         alpha_header->hotspotY = hotspotY;
167         alpha_header->width = width;
168         alpha_header->height = height;
169
170         memcpy(header + 1, image, image_size);
171         vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
172                   vps->cursor.bo->tbo.resource->start);
173 }
174
175
176 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
177 {
178         return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
179 }
180
181 /**
182  * vmw_du_cursor_plane_acquire_image -- Acquire the image data
183  * @vps: cursor plane state
184  */
185 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
186 {
187         bool is_iomem;
188         if (vps->surf) {
189                 if (vps->surf_mapped)
190                         return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
191                 return vps->surf->snooper.image;
192         } else if (vps->bo)
193                 return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
194         return NULL;
195 }
196
197 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
198                                             struct vmw_plane_state *new_vps)
199 {
200         void *old_image;
201         void *new_image;
202         u32 size;
203         bool changed;
204
205         if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
206             old_vps->base.crtc_h != new_vps->base.crtc_h)
207             return true;
208
209         if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
210             old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
211             return true;
212
213         size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
214
215         old_image = vmw_du_cursor_plane_acquire_image(old_vps);
216         new_image = vmw_du_cursor_plane_acquire_image(new_vps);
217
218         changed = false;
219         if (old_image && new_image)
220                 changed = memcmp(old_image, new_image, size) != 0;
221
222         return changed;
223 }
224
225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
226 {
227         if (!(*vbo))
228                 return;
229
230         ttm_bo_unpin(&(*vbo)->tbo);
231         vmw_bo_unreference(vbo);
232 }
233
234 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
235                                   struct vmw_plane_state *vps)
236 {
237         u32 i;
238
239         if (!vps->cursor.bo)
240                 return;
241
242         vmw_du_cursor_plane_unmap_cm(vps);
243
244         /* Look for a free slot to return this mob to the cache. */
245         for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
246                 if (!vcp->cursor_mobs[i]) {
247                         vcp->cursor_mobs[i] = vps->cursor.bo;
248                         vps->cursor.bo = NULL;
249                         return;
250                 }
251         }
252
253         /* Cache is full: See if this mob is bigger than an existing mob. */
254         for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
255                 if (vcp->cursor_mobs[i]->tbo.base.size <
256                     vps->cursor.bo->tbo.base.size) {
257                         vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
258                         vcp->cursor_mobs[i] = vps->cursor.bo;
259                         vps->cursor.bo = NULL;
260                         return;
261                 }
262         }
263
264         /* Destroy it if it's not worth caching. */
265         vmw_du_destroy_cursor_mob(&vps->cursor.bo);
266 }
267
268 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
269                                  struct vmw_plane_state *vps)
270 {
271         struct vmw_private *dev_priv = vcp->base.dev->dev_private;
272         u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
273         u32 i;
274         u32 cursor_max_dim, mob_max_size;
275         int ret;
276
277         if (!dev_priv->has_mob ||
278             (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
279                 return -EINVAL;
280
281         mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
282         cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
283
284         if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
285             vps->base.crtc_h > cursor_max_dim)
286                 return -EINVAL;
287
288         if (vps->cursor.bo) {
289                 if (vps->cursor.bo->tbo.base.size >= size)
290                         return 0;
291                 vmw_du_put_cursor_mob(vcp, vps);
292         }
293
294         /* Look for an unused mob in the cache. */
295         for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
296                 if (vcp->cursor_mobs[i] &&
297                     vcp->cursor_mobs[i]->tbo.base.size >= size) {
298                         vps->cursor.bo = vcp->cursor_mobs[i];
299                         vcp->cursor_mobs[i] = NULL;
300                         return 0;
301                 }
302         }
303         /* Create a new mob if we can't find an existing one. */
304         ret = vmw_bo_create_and_populate(dev_priv, size,
305                                          VMW_BO_DOMAIN_MOB,
306                                          &vps->cursor.bo);
307
308         if (ret != 0)
309                 return ret;
310
311         /* Fence the mob creation so we are guarateed to have the mob */
312         ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
313         if (ret != 0)
314                 goto teardown;
315
316         vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
317         ttm_bo_unreserve(&vps->cursor.bo->tbo);
318         return 0;
319
320 teardown:
321         vmw_du_destroy_cursor_mob(&vps->cursor.bo);
322         return ret;
323 }
324
325
326 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
327                                        bool show, int x, int y)
328 {
329         const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
330                                              : SVGA_CURSOR_ON_HIDE;
331         uint32_t count;
332
333         spin_lock(&dev_priv->cursor_lock);
334         if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
335                 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
336                 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
337                 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
338                 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
339                 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
340         } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
341                 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
342                 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
343                 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
344                 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
345                 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
346         } else {
347                 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
348                 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
349                 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
350         }
351         spin_unlock(&dev_priv->cursor_lock);
352 }
353
354 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
355                           struct ttm_object_file *tfile,
356                           struct ttm_buffer_object *bo,
357                           SVGA3dCmdHeader *header)
358 {
359         struct ttm_bo_kmap_obj map;
360         unsigned long kmap_offset;
361         unsigned long kmap_num;
362         SVGA3dCopyBox *box;
363         unsigned box_count;
364         void *virtual;
365         bool is_iomem;
366         struct vmw_dma_cmd {
367                 SVGA3dCmdHeader header;
368                 SVGA3dCmdSurfaceDMA dma;
369         } *cmd;
370         int i, ret;
371         const struct SVGA3dSurfaceDesc *desc =
372                 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
373         const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
374
375         cmd = container_of(header, struct vmw_dma_cmd, header);
376
377         /* No snooper installed, nothing to copy */
378         if (!srf->snooper.image)
379                 return;
380
381         if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
382                 DRM_ERROR("face and mipmap for cursors should never != 0\n");
383                 return;
384         }
385
386         if (cmd->header.size < 64) {
387                 DRM_ERROR("at least one full copy box must be given\n");
388                 return;
389         }
390
391         box = (SVGA3dCopyBox *)&cmd[1];
392         box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
393                         sizeof(SVGA3dCopyBox);
394
395         if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
396             box->x != 0    || box->y != 0    || box->z != 0    ||
397             box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
398             box->d != 1    || box_count != 1 ||
399             box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
400                 /* TODO handle none page aligned offsets */
401                 /* TODO handle more dst & src != 0 */
402                 /* TODO handle more then one copy */
403                 DRM_ERROR("Can't snoop dma request for cursor!\n");
404                 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
405                           box->srcx, box->srcy, box->srcz,
406                           box->x, box->y, box->z,
407                           box->w, box->h, box->d, box_count,
408                           cmd->dma.guest.ptr.offset);
409                 return;
410         }
411
412         kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
413         kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
414
415         ret = ttm_bo_reserve(bo, true, false, NULL);
416         if (unlikely(ret != 0)) {
417                 DRM_ERROR("reserve failed\n");
418                 return;
419         }
420
421         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
422         if (unlikely(ret != 0))
423                 goto err_unreserve;
424
425         virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
426
427         if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
428                 memcpy(srf->snooper.image, virtual,
429                        VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
430         } else {
431                 /* Image is unsigned pointer. */
432                 for (i = 0; i < box->h; i++)
433                         memcpy(srf->snooper.image + i * image_pitch,
434                                virtual + i * cmd->dma.guest.pitch,
435                                box->w * desc->pitchBytesPerBlock);
436         }
437
438         srf->snooper.age++;
439
440         ttm_bo_kunmap(&map);
441 err_unreserve:
442         ttm_bo_unreserve(bo);
443 }
444
445 /**
446  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
447  *
448  * @dev_priv: Pointer to the device private struct.
449  *
450  * Clears all legacy hotspots.
451  */
452 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
453 {
454         struct drm_device *dev = &dev_priv->drm;
455         struct vmw_display_unit *du;
456         struct drm_crtc *crtc;
457
458         drm_modeset_lock_all(dev);
459         drm_for_each_crtc(crtc, dev) {
460                 du = vmw_crtc_to_du(crtc);
461
462                 du->hotspot_x = 0;
463                 du->hotspot_y = 0;
464         }
465         drm_modeset_unlock_all(dev);
466 }
467
468 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
469 {
470         struct drm_device *dev = &dev_priv->drm;
471         struct vmw_display_unit *du;
472         struct drm_crtc *crtc;
473
474         mutex_lock(&dev->mode_config.mutex);
475
476         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
477                 du = vmw_crtc_to_du(crtc);
478                 if (!du->cursor_surface ||
479                     du->cursor_age == du->cursor_surface->snooper.age ||
480                     !du->cursor_surface->snooper.image)
481                         continue;
482
483                 du->cursor_age = du->cursor_surface->snooper.age;
484                 vmw_send_define_cursor_cmd(dev_priv,
485                                            du->cursor_surface->snooper.image,
486                                            VMW_CURSOR_SNOOP_WIDTH,
487                                            VMW_CURSOR_SNOOP_HEIGHT,
488                                            du->hotspot_x + du->core_hotspot_x,
489                                            du->hotspot_y + du->core_hotspot_y);
490         }
491
492         mutex_unlock(&dev->mode_config.mutex);
493 }
494
495
496 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
497 {
498         struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
499         u32 i;
500
501         vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
502
503         for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
504                 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
505
506         drm_plane_cleanup(plane);
507 }
508
509
510 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
511 {
512         drm_plane_cleanup(plane);
513
514         /* Planes are static in our case so we don't free it */
515 }
516
517
518 /**
519  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
520  *
521  * @vps: plane state associated with the display surface
522  * @unreference: true if we also want to unreference the display.
523  */
524 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
525                              bool unreference)
526 {
527         if (vps->surf) {
528                 if (vps->pinned) {
529                         vmw_resource_unpin(&vps->surf->res);
530                         vps->pinned--;
531                 }
532
533                 if (unreference) {
534                         if (vps->pinned)
535                                 DRM_ERROR("Surface still pinned\n");
536                         vmw_surface_unreference(&vps->surf);
537                 }
538         }
539 }
540
541
542 /**
543  * vmw_du_plane_cleanup_fb - Unpins the plane surface
544  *
545  * @plane:  display plane
546  * @old_state: Contains the FB to clean up
547  *
548  * Unpins the framebuffer surface
549  *
550  * Returns 0 on success
551  */
552 void
553 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
554                         struct drm_plane_state *old_state)
555 {
556         struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
557
558         vmw_du_plane_unpin_surf(vps, false);
559 }
560
561
562 /**
563  * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
564  *
565  * @vps: plane_state
566  *
567  * Returns 0 on success
568  */
569
570 static int
571 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
572 {
573         int ret;
574         u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
575         struct ttm_buffer_object *bo;
576
577         if (!vps->cursor.bo)
578                 return -EINVAL;
579
580         bo = &vps->cursor.bo->tbo;
581
582         if (bo->base.size < size)
583                 return -EINVAL;
584
585         if (vps->cursor.bo->map.virtual)
586                 return 0;
587
588         ret = ttm_bo_reserve(bo, false, false, NULL);
589         if (unlikely(ret != 0))
590                 return -ENOMEM;
591
592         vmw_bo_map_and_cache(vps->cursor.bo);
593
594         ttm_bo_unreserve(bo);
595
596         if (unlikely(ret != 0))
597                 return -ENOMEM;
598
599         return 0;
600 }
601
602
603 /**
604  * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
605  *
606  * @vps: state of the cursor plane
607  *
608  * Returns 0 on success
609  */
610
611 static int
612 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
613 {
614         int ret = 0;
615         struct vmw_bo *vbo = vps->cursor.bo;
616
617         if (!vbo || !vbo->map.virtual)
618                 return 0;
619
620         ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
621         if (likely(ret == 0)) {
622                 vmw_bo_unmap(vbo);
623                 ttm_bo_unreserve(&vbo->tbo);
624         }
625
626         return ret;
627 }
628
629
630 /**
631  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
632  *
633  * @plane: cursor plane
634  * @old_state: contains the state to clean up
635  *
636  * Unmaps all cursor bo mappings and unpins the cursor surface
637  *
638  * Returns 0 on success
639  */
640 void
641 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
642                                struct drm_plane_state *old_state)
643 {
644         struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
645         struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
646         bool is_iomem;
647
648         if (vps->surf_mapped) {
649                 vmw_bo_unmap(vps->surf->res.guest_memory_bo);
650                 vps->surf_mapped = false;
651         }
652
653         if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
654                 const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
655
656                 if (likely(ret == 0)) {
657                         ttm_bo_kunmap(&vps->bo->map);
658                         ttm_bo_unreserve(&vps->bo->tbo);
659                 }
660         }
661
662         vmw_du_cursor_plane_unmap_cm(vps);
663         vmw_du_put_cursor_mob(vcp, vps);
664
665         vmw_du_plane_unpin_surf(vps, false);
666
667         if (vps->surf) {
668                 vmw_surface_unreference(&vps->surf);
669                 vps->surf = NULL;
670         }
671
672         if (vps->bo) {
673                 vmw_bo_unreference(&vps->bo);
674                 vps->bo = NULL;
675         }
676 }
677
678
679 /**
680  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
681  *
682  * @plane:  display plane
683  * @new_state: info on the new plane state, including the FB
684  *
685  * Returns 0 on success
686  */
687 int
688 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
689                                struct drm_plane_state *new_state)
690 {
691         struct drm_framebuffer *fb = new_state->fb;
692         struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
693         struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
694         int ret = 0;
695
696         if (vps->surf) {
697                 vmw_surface_unreference(&vps->surf);
698                 vps->surf = NULL;
699         }
700
701         if (vps->bo) {
702                 vmw_bo_unreference(&vps->bo);
703                 vps->bo = NULL;
704         }
705
706         if (fb) {
707                 if (vmw_framebuffer_to_vfb(fb)->bo) {
708                         vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
709                         vmw_bo_reference(vps->bo);
710                 } else {
711                         vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
712                         vmw_surface_reference(vps->surf);
713                 }
714         }
715
716         if (!vps->surf && vps->bo) {
717                 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
718
719                 /*
720                  * Not using vmw_bo_map_and_cache() helper here as we need to
721                  * reserve the ttm_buffer_object first which
722                  * vmw_bo_map_and_cache() omits.
723                  */
724                 ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
725
726                 if (unlikely(ret != 0))
727                         return -ENOMEM;
728
729                 ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
730
731                 ttm_bo_unreserve(&vps->bo->tbo);
732
733                 if (unlikely(ret != 0))
734                         return -ENOMEM;
735         } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
736
737                 WARN_ON(vps->surf->snooper.image);
738                 ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
739                                      NULL);
740                 if (unlikely(ret != 0))
741                         return -ENOMEM;
742                 vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
743                 ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
744                 vps->surf_mapped = true;
745         }
746
747         if (vps->surf || vps->bo) {
748                 vmw_du_get_cursor_mob(vcp, vps);
749                 vmw_du_cursor_plane_map_cm(vps);
750         }
751
752         return 0;
753 }
754
755
756 void
757 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
758                                   struct drm_atomic_state *state)
759 {
760         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
761                                                                            plane);
762         struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
763                                                                            plane);
764         struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
765         struct vmw_private *dev_priv = vmw_priv(crtc->dev);
766         struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
767         struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
768         struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
769         s32 hotspot_x, hotspot_y;
770
771         hotspot_x = du->hotspot_x + new_state->hotspot_x;
772         hotspot_y = du->hotspot_y + new_state->hotspot_y;
773
774         du->cursor_surface = vps->surf;
775         du->cursor_bo = vps->bo;
776
777         if (!vps->surf && !vps->bo) {
778                 vmw_cursor_update_position(dev_priv, false, 0, 0);
779                 return;
780         }
781
782         vps->cursor.hotspot_x = hotspot_x;
783         vps->cursor.hotspot_y = hotspot_y;
784
785         if (vps->surf) {
786                 du->cursor_age = du->cursor_surface->snooper.age;
787         }
788
789         if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
790                 /*
791                  * If it hasn't changed, avoid making the device do extra
792                  * work by keeping the old cursor active.
793                  */
794                 struct vmw_cursor_plane_state tmp = old_vps->cursor;
795                 old_vps->cursor = vps->cursor;
796                 vps->cursor = tmp;
797         } else {
798                 void *image = vmw_du_cursor_plane_acquire_image(vps);
799                 if (image)
800                         vmw_cursor_update_image(dev_priv, vps, image,
801                                                 new_state->crtc_w,
802                                                 new_state->crtc_h,
803                                                 hotspot_x, hotspot_y);
804         }
805
806         du->cursor_x = new_state->crtc_x + du->set_gui_x;
807         du->cursor_y = new_state->crtc_y + du->set_gui_y;
808
809         vmw_cursor_update_position(dev_priv, true,
810                                    du->cursor_x + hotspot_x,
811                                    du->cursor_y + hotspot_y);
812
813         du->core_hotspot_x = hotspot_x - du->hotspot_x;
814         du->core_hotspot_y = hotspot_y - du->hotspot_y;
815 }
816
817
818 /**
819  * vmw_du_primary_plane_atomic_check - check if the new state is okay
820  *
821  * @plane: display plane
822  * @state: info on the new plane state, including the FB
823  *
824  * Check if the new state is settable given the current state.  Other
825  * than what the atomic helper checks, we care about crtc fitting
826  * the FB and maintaining one active framebuffer.
827  *
828  * Returns 0 on success
829  */
830 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
831                                       struct drm_atomic_state *state)
832 {
833         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
834                                                                            plane);
835         struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
836                                                                            plane);
837         struct drm_crtc_state *crtc_state = NULL;
838         struct drm_framebuffer *new_fb = new_state->fb;
839         struct drm_framebuffer *old_fb = old_state->fb;
840         int ret;
841
842         /*
843          * Ignore damage clips if the framebuffer attached to the plane's state
844          * has changed since the last plane update (page-flip). In this case, a
845          * full plane update should happen because uploads are done per-buffer.
846          */
847         if (old_fb != new_fb)
848                 new_state->ignore_damage_clips = true;
849
850         if (new_state->crtc)
851                 crtc_state = drm_atomic_get_new_crtc_state(state,
852                                                            new_state->crtc);
853
854         ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
855                                                   DRM_PLANE_NO_SCALING,
856                                                   DRM_PLANE_NO_SCALING,
857                                                   false, true);
858
859         if (!ret && new_fb) {
860                 struct drm_crtc *crtc = new_state->crtc;
861                 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
862
863                 vmw_connector_state_to_vcs(du->connector.state);
864         }
865
866
867         return ret;
868 }
869
870
871 /**
872  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
873  *
874  * @plane: cursor plane
875  * @state: info on the new plane state
876  *
877  * This is a chance to fail if the new cursor state does not fit
878  * our requirements.
879  *
880  * Returns 0 on success
881  */
882 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
883                                      struct drm_atomic_state *state)
884 {
885         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
886                                                                            plane);
887         int ret = 0;
888         struct drm_crtc_state *crtc_state = NULL;
889         struct vmw_surface *surface = NULL;
890         struct drm_framebuffer *fb = new_state->fb;
891
892         if (new_state->crtc)
893                 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
894                                                            new_state->crtc);
895
896         ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
897                                                   DRM_PLANE_NO_SCALING,
898                                                   DRM_PLANE_NO_SCALING,
899                                                   true, true);
900         if (ret)
901                 return ret;
902
903         /* Turning off */
904         if (!fb)
905                 return 0;
906
907         /* A lot of the code assumes this */
908         if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
909                 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
910                           new_state->crtc_w, new_state->crtc_h);
911                 return -EINVAL;
912         }
913
914         if (!vmw_framebuffer_to_vfb(fb)->bo) {
915                 surface = vmw_framebuffer_to_vfbs(fb)->surface;
916
917                 WARN_ON(!surface);
918
919                 if (!surface ||
920                     (!surface->snooper.image && !surface->res.guest_memory_bo)) {
921                         DRM_ERROR("surface not suitable for cursor\n");
922                         return -EINVAL;
923                 }
924         }
925
926         return 0;
927 }
928
929
930 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
931                              struct drm_atomic_state *state)
932 {
933         struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
934                                                                          crtc);
935         struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
936         int connector_mask = drm_connector_mask(&du->connector);
937         bool has_primary = new_state->plane_mask &
938                            drm_plane_mask(crtc->primary);
939
940         /* We always want to have an active plane with an active CRTC */
941         if (has_primary != new_state->enable)
942                 return -EINVAL;
943
944
945         if (new_state->connector_mask != connector_mask &&
946             new_state->connector_mask != 0) {
947                 DRM_ERROR("Invalid connectors configuration\n");
948                 return -EINVAL;
949         }
950
951         /*
952          * Our virtual device does not have a dot clock, so use the logical
953          * clock value as the dot clock.
954          */
955         if (new_state->mode.crtc_clock == 0)
956                 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
957
958         return 0;
959 }
960
961
962 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
963                               struct drm_atomic_state *state)
964 {
965 }
966
967
968 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
969                               struct drm_atomic_state *state)
970 {
971 }
972
973
974 /**
975  * vmw_du_crtc_duplicate_state - duplicate crtc state
976  * @crtc: DRM crtc
977  *
978  * Allocates and returns a copy of the crtc state (both common and
979  * vmw-specific) for the specified crtc.
980  *
981  * Returns: The newly allocated crtc state, or NULL on failure.
982  */
983 struct drm_crtc_state *
984 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
985 {
986         struct drm_crtc_state *state;
987         struct vmw_crtc_state *vcs;
988
989         if (WARN_ON(!crtc->state))
990                 return NULL;
991
992         vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
993
994         if (!vcs)
995                 return NULL;
996
997         state = &vcs->base;
998
999         __drm_atomic_helper_crtc_duplicate_state(crtc, state);
1000
1001         return state;
1002 }
1003
1004
1005 /**
1006  * vmw_du_crtc_reset - creates a blank vmw crtc state
1007  * @crtc: DRM crtc
1008  *
1009  * Resets the atomic state for @crtc by freeing the state pointer (which
1010  * might be NULL, e.g. at driver load time) and allocating a new empty state
1011  * object.
1012  */
1013 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1014 {
1015         struct vmw_crtc_state *vcs;
1016
1017
1018         if (crtc->state) {
1019                 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1020
1021                 kfree(vmw_crtc_state_to_vcs(crtc->state));
1022         }
1023
1024         vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1025
1026         if (!vcs) {
1027                 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1028                 return;
1029         }
1030
1031         __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1032 }
1033
1034
1035 /**
1036  * vmw_du_crtc_destroy_state - destroy crtc state
1037  * @crtc: DRM crtc
1038  * @state: state object to destroy
1039  *
1040  * Destroys the crtc state (both common and vmw-specific) for the
1041  * specified plane.
1042  */
1043 void
1044 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1045                           struct drm_crtc_state *state)
1046 {
1047         drm_atomic_helper_crtc_destroy_state(crtc, state);
1048 }
1049
1050
1051 /**
1052  * vmw_du_plane_duplicate_state - duplicate plane state
1053  * @plane: drm plane
1054  *
1055  * Allocates and returns a copy of the plane state (both common and
1056  * vmw-specific) for the specified plane.
1057  *
1058  * Returns: The newly allocated plane state, or NULL on failure.
1059  */
1060 struct drm_plane_state *
1061 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1062 {
1063         struct drm_plane_state *state;
1064         struct vmw_plane_state *vps;
1065
1066         vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1067
1068         if (!vps)
1069                 return NULL;
1070
1071         vps->pinned = 0;
1072         vps->cpp = 0;
1073
1074         memset(&vps->cursor, 0, sizeof(vps->cursor));
1075
1076         /* Each ref counted resource needs to be acquired again */
1077         if (vps->surf)
1078                 (void) vmw_surface_reference(vps->surf);
1079
1080         if (vps->bo)
1081                 (void) vmw_bo_reference(vps->bo);
1082
1083         state = &vps->base;
1084
1085         __drm_atomic_helper_plane_duplicate_state(plane, state);
1086
1087         return state;
1088 }
1089
1090
1091 /**
1092  * vmw_du_plane_reset - creates a blank vmw plane state
1093  * @plane: drm plane
1094  *
1095  * Resets the atomic state for @plane by freeing the state pointer (which might
1096  * be NULL, e.g. at driver load time) and allocating a new empty state object.
1097  */
1098 void vmw_du_plane_reset(struct drm_plane *plane)
1099 {
1100         struct vmw_plane_state *vps;
1101
1102         if (plane->state)
1103                 vmw_du_plane_destroy_state(plane, plane->state);
1104
1105         vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1106
1107         if (!vps) {
1108                 DRM_ERROR("Cannot allocate vmw_plane_state\n");
1109                 return;
1110         }
1111
1112         __drm_atomic_helper_plane_reset(plane, &vps->base);
1113 }
1114
1115
1116 /**
1117  * vmw_du_plane_destroy_state - destroy plane state
1118  * @plane: DRM plane
1119  * @state: state object to destroy
1120  *
1121  * Destroys the plane state (both common and vmw-specific) for the
1122  * specified plane.
1123  */
1124 void
1125 vmw_du_plane_destroy_state(struct drm_plane *plane,
1126                            struct drm_plane_state *state)
1127 {
1128         struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1129
1130         /* Should have been freed by cleanup_fb */
1131         if (vps->surf)
1132                 vmw_surface_unreference(&vps->surf);
1133
1134         if (vps->bo)
1135                 vmw_bo_unreference(&vps->bo);
1136
1137         drm_atomic_helper_plane_destroy_state(plane, state);
1138 }
1139
1140
1141 /**
1142  * vmw_du_connector_duplicate_state - duplicate connector state
1143  * @connector: DRM connector
1144  *
1145  * Allocates and returns a copy of the connector state (both common and
1146  * vmw-specific) for the specified connector.
1147  *
1148  * Returns: The newly allocated connector state, or NULL on failure.
1149  */
1150 struct drm_connector_state *
1151 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1152 {
1153         struct drm_connector_state *state;
1154         struct vmw_connector_state *vcs;
1155
1156         if (WARN_ON(!connector->state))
1157                 return NULL;
1158
1159         vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1160
1161         if (!vcs)
1162                 return NULL;
1163
1164         state = &vcs->base;
1165
1166         __drm_atomic_helper_connector_duplicate_state(connector, state);
1167
1168         return state;
1169 }
1170
1171
1172 /**
1173  * vmw_du_connector_reset - creates a blank vmw connector state
1174  * @connector: DRM connector
1175  *
1176  * Resets the atomic state for @connector by freeing the state pointer (which
1177  * might be NULL, e.g. at driver load time) and allocating a new empty state
1178  * object.
1179  */
1180 void vmw_du_connector_reset(struct drm_connector *connector)
1181 {
1182         struct vmw_connector_state *vcs;
1183
1184
1185         if (connector->state) {
1186                 __drm_atomic_helper_connector_destroy_state(connector->state);
1187
1188                 kfree(vmw_connector_state_to_vcs(connector->state));
1189         }
1190
1191         vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1192
1193         if (!vcs) {
1194                 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1195                 return;
1196         }
1197
1198         __drm_atomic_helper_connector_reset(connector, &vcs->base);
1199 }
1200
1201
1202 /**
1203  * vmw_du_connector_destroy_state - destroy connector state
1204  * @connector: DRM connector
1205  * @state: state object to destroy
1206  *
1207  * Destroys the connector state (both common and vmw-specific) for the
1208  * specified plane.
1209  */
1210 void
1211 vmw_du_connector_destroy_state(struct drm_connector *connector,
1212                           struct drm_connector_state *state)
1213 {
1214         drm_atomic_helper_connector_destroy_state(connector, state);
1215 }
1216 /*
1217  * Generic framebuffer code
1218  */
1219
1220 /*
1221  * Surface framebuffer code
1222  */
1223
1224 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1225 {
1226         struct vmw_framebuffer_surface *vfbs =
1227                 vmw_framebuffer_to_vfbs(framebuffer);
1228
1229         drm_framebuffer_cleanup(framebuffer);
1230         vmw_surface_unreference(&vfbs->surface);
1231
1232         kfree(vfbs);
1233 }
1234
1235 /**
1236  * vmw_kms_readback - Perform a readback from the screen system to
1237  * a buffer-object backed framebuffer.
1238  *
1239  * @dev_priv: Pointer to the device private structure.
1240  * @file_priv: Pointer to a struct drm_file identifying the caller.
1241  * Must be set to NULL if @user_fence_rep is NULL.
1242  * @vfb: Pointer to the buffer-object backed framebuffer.
1243  * @user_fence_rep: User-space provided structure for fence information.
1244  * Must be set to non-NULL if @file_priv is non-NULL.
1245  * @vclips: Array of clip rects.
1246  * @num_clips: Number of clip rects in @vclips.
1247  *
1248  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1249  * interrupted.
1250  */
1251 int vmw_kms_readback(struct vmw_private *dev_priv,
1252                      struct drm_file *file_priv,
1253                      struct vmw_framebuffer *vfb,
1254                      struct drm_vmw_fence_rep __user *user_fence_rep,
1255                      struct drm_vmw_rect *vclips,
1256                      uint32_t num_clips)
1257 {
1258         switch (dev_priv->active_display_unit) {
1259         case vmw_du_screen_object:
1260                 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1261                                             user_fence_rep, vclips, num_clips,
1262                                             NULL);
1263         case vmw_du_screen_target:
1264                 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1265                                              user_fence_rep, NULL, vclips, num_clips,
1266                                              1, NULL);
1267         default:
1268                 WARN_ONCE(true,
1269                           "Readback called with invalid display system.\n");
1270 }
1271
1272         return -ENOSYS;
1273 }
1274
1275
1276 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1277         .destroy = vmw_framebuffer_surface_destroy,
1278         .dirty = drm_atomic_helper_dirtyfb,
1279 };
1280
1281 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1282                                            struct vmw_surface *surface,
1283                                            struct vmw_framebuffer **out,
1284                                            const struct drm_mode_fb_cmd2
1285                                            *mode_cmd,
1286                                            bool is_bo_proxy)
1287
1288 {
1289         struct drm_device *dev = &dev_priv->drm;
1290         struct vmw_framebuffer_surface *vfbs;
1291         enum SVGA3dSurfaceFormat format;
1292         int ret;
1293
1294         /* 3D is only supported on HWv8 and newer hosts */
1295         if (dev_priv->active_display_unit == vmw_du_legacy)
1296                 return -ENOSYS;
1297
1298         /*
1299          * Sanity checks.
1300          */
1301
1302         if (!drm_any_plane_has_format(&dev_priv->drm,
1303                                       mode_cmd->pixel_format,
1304                                       mode_cmd->modifier[0])) {
1305                 drm_dbg(&dev_priv->drm,
1306                         "unsupported pixel format %p4cc / modifier 0x%llx\n",
1307                         &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1308                 return -EINVAL;
1309         }
1310
1311         /* Surface must be marked as a scanout. */
1312         if (unlikely(!surface->metadata.scanout))
1313                 return -EINVAL;
1314
1315         if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1316                      surface->metadata.num_sizes != 1 ||
1317                      surface->metadata.base_size.width < mode_cmd->width ||
1318                      surface->metadata.base_size.height < mode_cmd->height ||
1319                      surface->metadata.base_size.depth != 1)) {
1320                 DRM_ERROR("Incompatible surface dimensions "
1321                           "for requested mode.\n");
1322                 return -EINVAL;
1323         }
1324
1325         switch (mode_cmd->pixel_format) {
1326         case DRM_FORMAT_ARGB8888:
1327                 format = SVGA3D_A8R8G8B8;
1328                 break;
1329         case DRM_FORMAT_XRGB8888:
1330                 format = SVGA3D_X8R8G8B8;
1331                 break;
1332         case DRM_FORMAT_RGB565:
1333                 format = SVGA3D_R5G6B5;
1334                 break;
1335         case DRM_FORMAT_XRGB1555:
1336                 format = SVGA3D_A1R5G5B5;
1337                 break;
1338         default:
1339                 DRM_ERROR("Invalid pixel format: %p4cc\n",
1340                           &mode_cmd->pixel_format);
1341                 return -EINVAL;
1342         }
1343
1344         /*
1345          * For DX, surface format validation is done when surface->scanout
1346          * is set.
1347          */
1348         if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1349                 DRM_ERROR("Invalid surface format for requested mode.\n");
1350                 return -EINVAL;
1351         }
1352
1353         vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1354         if (!vfbs) {
1355                 ret = -ENOMEM;
1356                 goto out_err1;
1357         }
1358
1359         drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1360         vfbs->surface = vmw_surface_reference(surface);
1361         vfbs->base.user_handle = mode_cmd->handles[0];
1362         vfbs->is_bo_proxy = is_bo_proxy;
1363
1364         *out = &vfbs->base;
1365
1366         ret = drm_framebuffer_init(dev, &vfbs->base.base,
1367                                    &vmw_framebuffer_surface_funcs);
1368         if (ret)
1369                 goto out_err2;
1370
1371         return 0;
1372
1373 out_err2:
1374         vmw_surface_unreference(&surface);
1375         kfree(vfbs);
1376 out_err1:
1377         return ret;
1378 }
1379
1380 /*
1381  * Buffer-object framebuffer code
1382  */
1383
1384 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1385                                             struct drm_file *file_priv,
1386                                             unsigned int *handle)
1387 {
1388         struct vmw_framebuffer_bo *vfbd =
1389                         vmw_framebuffer_to_vfbd(fb);
1390
1391         return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1392 }
1393
1394 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1395 {
1396         struct vmw_framebuffer_bo *vfbd =
1397                 vmw_framebuffer_to_vfbd(framebuffer);
1398
1399         drm_framebuffer_cleanup(framebuffer);
1400         vmw_bo_unreference(&vfbd->buffer);
1401
1402         kfree(vfbd);
1403 }
1404
1405 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1406         .create_handle = vmw_framebuffer_bo_create_handle,
1407         .destroy = vmw_framebuffer_bo_destroy,
1408         .dirty = drm_atomic_helper_dirtyfb,
1409 };
1410
1411 /**
1412  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1413  *
1414  * @dev: DRM device
1415  * @mode_cmd: parameters for the new surface
1416  * @bo_mob: MOB backing the buffer object
1417  * @srf_out: newly created surface
1418  *
1419  * When the content FB is a buffer object, we create a surface as a proxy to the
1420  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1421  * This is a more efficient approach
1422  *
1423  * RETURNS:
1424  * 0 on success, error code otherwise
1425  */
1426 static int vmw_create_bo_proxy(struct drm_device *dev,
1427                                const struct drm_mode_fb_cmd2 *mode_cmd,
1428                                struct vmw_bo *bo_mob,
1429                                struct vmw_surface **srf_out)
1430 {
1431         struct vmw_surface_metadata metadata = {0};
1432         uint32_t format;
1433         struct vmw_resource *res;
1434         unsigned int bytes_pp;
1435         int ret;
1436
1437         switch (mode_cmd->pixel_format) {
1438         case DRM_FORMAT_ARGB8888:
1439         case DRM_FORMAT_XRGB8888:
1440                 format = SVGA3D_X8R8G8B8;
1441                 bytes_pp = 4;
1442                 break;
1443
1444         case DRM_FORMAT_RGB565:
1445         case DRM_FORMAT_XRGB1555:
1446                 format = SVGA3D_R5G6B5;
1447                 bytes_pp = 2;
1448                 break;
1449
1450         case 8:
1451                 format = SVGA3D_P8;
1452                 bytes_pp = 1;
1453                 break;
1454
1455         default:
1456                 DRM_ERROR("Invalid framebuffer format %p4cc\n",
1457                           &mode_cmd->pixel_format);
1458                 return -EINVAL;
1459         }
1460
1461         metadata.format = format;
1462         metadata.mip_levels[0] = 1;
1463         metadata.num_sizes = 1;
1464         metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1465         metadata.base_size.height =  mode_cmd->height;
1466         metadata.base_size.depth = 1;
1467         metadata.scanout = true;
1468
1469         ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1470         if (ret) {
1471                 DRM_ERROR("Failed to allocate proxy content buffer\n");
1472                 return ret;
1473         }
1474
1475         res = &(*srf_out)->res;
1476
1477         /* Reserve and switch the backing mob. */
1478         mutex_lock(&res->dev_priv->cmdbuf_mutex);
1479         (void) vmw_resource_reserve(res, false, true);
1480         vmw_user_bo_unref(&res->guest_memory_bo);
1481         res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1482         res->guest_memory_offset = 0;
1483         vmw_resource_unreserve(res, false, false, false, NULL, 0);
1484         mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1485
1486         return 0;
1487 }
1488
1489
1490
1491 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1492                                       struct vmw_bo *bo,
1493                                       struct vmw_framebuffer **out,
1494                                       const struct drm_mode_fb_cmd2
1495                                       *mode_cmd)
1496
1497 {
1498         struct drm_device *dev = &dev_priv->drm;
1499         struct vmw_framebuffer_bo *vfbd;
1500         unsigned int requested_size;
1501         int ret;
1502
1503         requested_size = mode_cmd->height * mode_cmd->pitches[0];
1504         if (unlikely(requested_size > bo->tbo.base.size)) {
1505                 DRM_ERROR("Screen buffer object size is too small "
1506                           "for requested mode.\n");
1507                 return -EINVAL;
1508         }
1509
1510         if (!drm_any_plane_has_format(&dev_priv->drm,
1511                                       mode_cmd->pixel_format,
1512                                       mode_cmd->modifier[0])) {
1513                 drm_dbg(&dev_priv->drm,
1514                         "unsupported pixel format %p4cc / modifier 0x%llx\n",
1515                         &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1516                 return -EINVAL;
1517         }
1518
1519         vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1520         if (!vfbd) {
1521                 ret = -ENOMEM;
1522                 goto out_err1;
1523         }
1524
1525         vfbd->base.base.obj[0] = &bo->tbo.base;
1526         drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1527         vfbd->base.bo = true;
1528         vfbd->buffer = vmw_bo_reference(bo);
1529         vfbd->base.user_handle = mode_cmd->handles[0];
1530         *out = &vfbd->base;
1531
1532         ret = drm_framebuffer_init(dev, &vfbd->base.base,
1533                                    &vmw_framebuffer_bo_funcs);
1534         if (ret)
1535                 goto out_err2;
1536
1537         return 0;
1538
1539 out_err2:
1540         vmw_bo_unreference(&bo);
1541         kfree(vfbd);
1542 out_err1:
1543         return ret;
1544 }
1545
1546
1547 /**
1548  * vmw_kms_srf_ok - check if a surface can be created
1549  *
1550  * @dev_priv: Pointer to device private struct.
1551  * @width: requested width
1552  * @height: requested height
1553  *
1554  * Surfaces need to be less than texture size
1555  */
1556 static bool
1557 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1558 {
1559         if (width  > dev_priv->texture_max_width ||
1560             height > dev_priv->texture_max_height)
1561                 return false;
1562
1563         return true;
1564 }
1565
1566 /**
1567  * vmw_kms_new_framebuffer - Create a new framebuffer.
1568  *
1569  * @dev_priv: Pointer to device private struct.
1570  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1571  * Either @bo or @surface must be NULL.
1572  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1573  * Either @bo or @surface must be NULL.
1574  * @only_2d: No presents will occur to this buffer object based framebuffer.
1575  * This helps the code to do some important optimizations.
1576  * @mode_cmd: Frame-buffer metadata.
1577  */
1578 struct vmw_framebuffer *
1579 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1580                         struct vmw_bo *bo,
1581                         struct vmw_surface *surface,
1582                         bool only_2d,
1583                         const struct drm_mode_fb_cmd2 *mode_cmd)
1584 {
1585         struct vmw_framebuffer *vfb = NULL;
1586         bool is_bo_proxy = false;
1587         int ret;
1588
1589         /*
1590          * We cannot use the SurfaceDMA command in an non-accelerated VM,
1591          * therefore, wrap the buffer object in a surface so we can use the
1592          * SurfaceCopy command.
1593          */
1594         if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1595             bo && only_2d &&
1596             mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1597             dev_priv->active_display_unit == vmw_du_screen_target) {
1598                 ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1599                                           bo, &surface);
1600                 if (ret)
1601                         return ERR_PTR(ret);
1602
1603                 is_bo_proxy = true;
1604         }
1605
1606         /* Create the new framebuffer depending one what we have */
1607         if (surface) {
1608                 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1609                                                       mode_cmd,
1610                                                       is_bo_proxy);
1611                 /*
1612                  * vmw_create_bo_proxy() adds a reference that is no longer
1613                  * needed
1614                  */
1615                 if (is_bo_proxy)
1616                         vmw_surface_unreference(&surface);
1617         } else if (bo) {
1618                 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1619                                                  mode_cmd);
1620         } else {
1621                 BUG();
1622         }
1623
1624         if (ret)
1625                 return ERR_PTR(ret);
1626
1627         return vfb;
1628 }
1629
1630 /*
1631  * Generic Kernel modesetting functions
1632  */
1633
1634 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1635                                                  struct drm_file *file_priv,
1636                                                  const struct drm_mode_fb_cmd2 *mode_cmd)
1637 {
1638         struct vmw_private *dev_priv = vmw_priv(dev);
1639         struct vmw_framebuffer *vfb = NULL;
1640         struct vmw_surface *surface = NULL;
1641         struct vmw_bo *bo = NULL;
1642         int ret;
1643
1644         /* returns either a bo or surface */
1645         ret = vmw_user_lookup_handle(dev_priv, file_priv,
1646                                      mode_cmd->handles[0],
1647                                      &surface, &bo);
1648         if (ret) {
1649                 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1650                           mode_cmd->handles[0], mode_cmd->handles[0]);
1651                 goto err_out;
1652         }
1653
1654
1655         if (!bo &&
1656             !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1657                 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1658                         dev_priv->texture_max_width,
1659                         dev_priv->texture_max_height);
1660                 goto err_out;
1661         }
1662
1663
1664         vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1665                                       !(dev_priv->capabilities & SVGA_CAP_3D),
1666                                       mode_cmd);
1667         if (IS_ERR(vfb)) {
1668                 ret = PTR_ERR(vfb);
1669                 goto err_out;
1670         }
1671
1672 err_out:
1673         /* vmw_user_lookup_handle takes one ref so does new_fb */
1674         if (bo)
1675                 vmw_user_bo_unref(&bo);
1676         if (surface)
1677                 vmw_surface_unreference(&surface);
1678
1679         if (ret) {
1680                 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1681                 return ERR_PTR(ret);
1682         }
1683
1684         return &vfb->base;
1685 }
1686
1687 /**
1688  * vmw_kms_check_display_memory - Validates display memory required for a
1689  * topology
1690  * @dev: DRM device
1691  * @num_rects: number of drm_rect in rects
1692  * @rects: array of drm_rect representing the topology to validate indexed by
1693  * crtc index.
1694  *
1695  * Returns:
1696  * 0 on success otherwise negative error code
1697  */
1698 static int vmw_kms_check_display_memory(struct drm_device *dev,
1699                                         uint32_t num_rects,
1700                                         struct drm_rect *rects)
1701 {
1702         struct vmw_private *dev_priv = vmw_priv(dev);
1703         struct drm_rect bounding_box = {0};
1704         u64 total_pixels = 0, pixel_mem, bb_mem;
1705         int i;
1706
1707         for (i = 0; i < num_rects; i++) {
1708                 /*
1709                  * For STDU only individual screen (screen target) is limited by
1710                  * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1711                  */
1712                 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1713                     (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1714                      drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1715                         VMW_DEBUG_KMS("Screen size not supported.\n");
1716                         return -EINVAL;
1717                 }
1718
1719                 /* Bounding box upper left is at (0,0). */
1720                 if (rects[i].x2 > bounding_box.x2)
1721                         bounding_box.x2 = rects[i].x2;
1722
1723                 if (rects[i].y2 > bounding_box.y2)
1724                         bounding_box.y2 = rects[i].y2;
1725
1726                 total_pixels += (u64) drm_rect_width(&rects[i]) *
1727                         (u64) drm_rect_height(&rects[i]);
1728         }
1729
1730         /* Virtual svga device primary limits are always in 32-bpp. */
1731         pixel_mem = total_pixels * 4;
1732
1733         /*
1734          * For HV10 and below prim_bb_mem is vram size. When
1735          * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1736          * limit on primary bounding box
1737          */
1738         if (pixel_mem > dev_priv->max_primary_mem) {
1739                 VMW_DEBUG_KMS("Combined output size too large.\n");
1740                 return -EINVAL;
1741         }
1742
1743         /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1744         if (dev_priv->active_display_unit != vmw_du_screen_target ||
1745             !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1746                 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1747
1748                 if (bb_mem > dev_priv->max_primary_mem) {
1749                         VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1750                         return -EINVAL;
1751                 }
1752         }
1753
1754         return 0;
1755 }
1756
1757 /**
1758  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1759  * crtc mutex
1760  * @state: The atomic state pointer containing the new atomic state
1761  * @crtc: The crtc
1762  *
1763  * This function returns the new crtc state if it's part of the state update.
1764  * Otherwise returns the current crtc state. It also makes sure that the
1765  * crtc mutex is locked.
1766  *
1767  * Returns: A valid crtc state pointer or NULL. It may also return a
1768  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1769  */
1770 static struct drm_crtc_state *
1771 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1772 {
1773         struct drm_crtc_state *crtc_state;
1774
1775         crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1776         if (crtc_state) {
1777                 lockdep_assert_held(&crtc->mutex.mutex.base);
1778         } else {
1779                 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1780
1781                 if (ret != 0 && ret != -EALREADY)
1782                         return ERR_PTR(ret);
1783
1784                 crtc_state = crtc->state;
1785         }
1786
1787         return crtc_state;
1788 }
1789
1790 /**
1791  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1792  * from the same fb after the new state is committed.
1793  * @dev: The drm_device.
1794  * @state: The new state to be checked.
1795  *
1796  * Returns:
1797  *   Zero on success,
1798  *   -EINVAL on invalid state,
1799  *   -EDEADLK if modeset locking needs to be rerun.
1800  */
1801 static int vmw_kms_check_implicit(struct drm_device *dev,
1802                                   struct drm_atomic_state *state)
1803 {
1804         struct drm_framebuffer *implicit_fb = NULL;
1805         struct drm_crtc *crtc;
1806         struct drm_crtc_state *crtc_state;
1807         struct drm_plane_state *plane_state;
1808
1809         drm_for_each_crtc(crtc, dev) {
1810                 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1811
1812                 if (!du->is_implicit)
1813                         continue;
1814
1815                 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1816                 if (IS_ERR(crtc_state))
1817                         return PTR_ERR(crtc_state);
1818
1819                 if (!crtc_state || !crtc_state->enable)
1820                         continue;
1821
1822                 /*
1823                  * Can't move primary planes across crtcs, so this is OK.
1824                  * It also means we don't need to take the plane mutex.
1825                  */
1826                 plane_state = du->primary.state;
1827                 if (plane_state->crtc != crtc)
1828                         continue;
1829
1830                 if (!implicit_fb)
1831                         implicit_fb = plane_state->fb;
1832                 else if (implicit_fb != plane_state->fb)
1833                         return -EINVAL;
1834         }
1835
1836         return 0;
1837 }
1838
1839 /**
1840  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1841  * @dev: DRM device
1842  * @state: the driver state object
1843  *
1844  * Returns:
1845  * 0 on success otherwise negative error code
1846  */
1847 static int vmw_kms_check_topology(struct drm_device *dev,
1848                                   struct drm_atomic_state *state)
1849 {
1850         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1851         struct drm_rect *rects;
1852         struct drm_crtc *crtc;
1853         uint32_t i;
1854         int ret = 0;
1855
1856         rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1857                         GFP_KERNEL);
1858         if (!rects)
1859                 return -ENOMEM;
1860
1861         drm_for_each_crtc(crtc, dev) {
1862                 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1863                 struct drm_crtc_state *crtc_state;
1864
1865                 i = drm_crtc_index(crtc);
1866
1867                 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1868                 if (IS_ERR(crtc_state)) {
1869                         ret = PTR_ERR(crtc_state);
1870                         goto clean;
1871                 }
1872
1873                 if (!crtc_state)
1874                         continue;
1875
1876                 if (crtc_state->enable) {
1877                         rects[i].x1 = du->gui_x;
1878                         rects[i].y1 = du->gui_y;
1879                         rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1880                         rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1881                 } else {
1882                         rects[i].x1 = 0;
1883                         rects[i].y1 = 0;
1884                         rects[i].x2 = 0;
1885                         rects[i].y2 = 0;
1886                 }
1887         }
1888
1889         /* Determine change to topology due to new atomic state */
1890         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1891                                       new_crtc_state, i) {
1892                 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1893                 struct drm_connector *connector;
1894                 struct drm_connector_state *conn_state;
1895                 struct vmw_connector_state *vmw_conn_state;
1896
1897                 if (!du->pref_active && new_crtc_state->enable) {
1898                         VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1899                         ret = -EINVAL;
1900                         goto clean;
1901                 }
1902
1903                 /*
1904                  * For vmwgfx each crtc has only one connector attached and it
1905                  * is not changed so don't really need to check the
1906                  * crtc->connector_mask and iterate over it.
1907                  */
1908                 connector = &du->connector;
1909                 conn_state = drm_atomic_get_connector_state(state, connector);
1910                 if (IS_ERR(conn_state)) {
1911                         ret = PTR_ERR(conn_state);
1912                         goto clean;
1913                 }
1914
1915                 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1916                 vmw_conn_state->gui_x = du->gui_x;
1917                 vmw_conn_state->gui_y = du->gui_y;
1918         }
1919
1920         ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1921                                            rects);
1922
1923 clean:
1924         kfree(rects);
1925         return ret;
1926 }
1927
1928 /**
1929  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1930  *
1931  * @dev: DRM device
1932  * @state: the driver state object
1933  *
1934  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1935  * us to assign a value to mode->crtc_clock so that
1936  * drm_calc_timestamping_constants() won't throw an error message
1937  *
1938  * Returns:
1939  * Zero for success or -errno
1940  */
1941 static int
1942 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1943                              struct drm_atomic_state *state)
1944 {
1945         struct drm_crtc *crtc;
1946         struct drm_crtc_state *crtc_state;
1947         bool need_modeset = false;
1948         int i, ret;
1949
1950         ret = drm_atomic_helper_check(dev, state);
1951         if (ret)
1952                 return ret;
1953
1954         ret = vmw_kms_check_implicit(dev, state);
1955         if (ret) {
1956                 VMW_DEBUG_KMS("Invalid implicit state\n");
1957                 return ret;
1958         }
1959
1960         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1961                 if (drm_atomic_crtc_needs_modeset(crtc_state))
1962                         need_modeset = true;
1963         }
1964
1965         if (need_modeset)
1966                 return vmw_kms_check_topology(dev, state);
1967
1968         return ret;
1969 }
1970
1971 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1972         .fb_create = vmw_kms_fb_create,
1973         .atomic_check = vmw_kms_atomic_check_modeset,
1974         .atomic_commit = drm_atomic_helper_commit,
1975 };
1976
1977 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1978                                    struct drm_file *file_priv,
1979                                    struct vmw_framebuffer *vfb,
1980                                    struct vmw_surface *surface,
1981                                    uint32_t sid,
1982                                    int32_t destX, int32_t destY,
1983                                    struct drm_vmw_rect *clips,
1984                                    uint32_t num_clips)
1985 {
1986         return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1987                                             &surface->res, destX, destY,
1988                                             num_clips, 1, NULL, NULL);
1989 }
1990
1991
1992 int vmw_kms_present(struct vmw_private *dev_priv,
1993                     struct drm_file *file_priv,
1994                     struct vmw_framebuffer *vfb,
1995                     struct vmw_surface *surface,
1996                     uint32_t sid,
1997                     int32_t destX, int32_t destY,
1998                     struct drm_vmw_rect *clips,
1999                     uint32_t num_clips)
2000 {
2001         int ret;
2002
2003         switch (dev_priv->active_display_unit) {
2004         case vmw_du_screen_target:
2005                 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2006                                                  &surface->res, destX, destY,
2007                                                  num_clips, 1, NULL, NULL);
2008                 break;
2009         case vmw_du_screen_object:
2010                 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2011                                               sid, destX, destY, clips,
2012                                               num_clips);
2013                 break;
2014         default:
2015                 WARN_ONCE(true,
2016                           "Present called with invalid display system.\n");
2017                 ret = -ENOSYS;
2018                 break;
2019         }
2020         if (ret)
2021                 return ret;
2022
2023         vmw_cmd_flush(dev_priv, false);
2024
2025         return 0;
2026 }
2027
2028 static void
2029 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2030 {
2031         if (dev_priv->hotplug_mode_update_property)
2032                 return;
2033
2034         dev_priv->hotplug_mode_update_property =
2035                 drm_property_create_range(&dev_priv->drm,
2036                                           DRM_MODE_PROP_IMMUTABLE,
2037                                           "hotplug_mode_update", 0, 1);
2038 }
2039
2040 int vmw_kms_init(struct vmw_private *dev_priv)
2041 {
2042         struct drm_device *dev = &dev_priv->drm;
2043         int ret;
2044         static const char *display_unit_names[] = {
2045                 "Invalid",
2046                 "Legacy",
2047                 "Screen Object",
2048                 "Screen Target",
2049                 "Invalid (max)"
2050         };
2051
2052         drm_mode_config_init(dev);
2053         dev->mode_config.funcs = &vmw_kms_funcs;
2054         dev->mode_config.min_width = 1;
2055         dev->mode_config.min_height = 1;
2056         dev->mode_config.max_width = dev_priv->texture_max_width;
2057         dev->mode_config.max_height = dev_priv->texture_max_height;
2058         dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2059
2060         drm_mode_create_suggested_offset_properties(dev);
2061         vmw_kms_create_hotplug_mode_update_property(dev_priv);
2062
2063         ret = vmw_kms_stdu_init_display(dev_priv);
2064         if (ret) {
2065                 ret = vmw_kms_sou_init_display(dev_priv);
2066                 if (ret) /* Fallback */
2067                         ret = vmw_kms_ldu_init_display(dev_priv);
2068         }
2069         BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2070         drm_info(&dev_priv->drm, "%s display unit initialized\n",
2071                  display_unit_names[dev_priv->active_display_unit]);
2072
2073         return ret;
2074 }
2075
2076 int vmw_kms_close(struct vmw_private *dev_priv)
2077 {
2078         int ret = 0;
2079
2080         /*
2081          * Docs says we should take the lock before calling this function
2082          * but since it destroys encoders and our destructor calls
2083          * drm_encoder_cleanup which takes the lock we deadlock.
2084          */
2085         drm_mode_config_cleanup(&dev_priv->drm);
2086         if (dev_priv->active_display_unit == vmw_du_legacy)
2087                 ret = vmw_kms_ldu_close_display(dev_priv);
2088
2089         return ret;
2090 }
2091
2092 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2093                                 struct drm_file *file_priv)
2094 {
2095         struct drm_vmw_cursor_bypass_arg *arg = data;
2096         struct vmw_display_unit *du;
2097         struct drm_crtc *crtc;
2098         int ret = 0;
2099
2100         mutex_lock(&dev->mode_config.mutex);
2101         if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2102
2103                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2104                         du = vmw_crtc_to_du(crtc);
2105                         du->hotspot_x = arg->xhot;
2106                         du->hotspot_y = arg->yhot;
2107                 }
2108
2109                 mutex_unlock(&dev->mode_config.mutex);
2110                 return 0;
2111         }
2112
2113         crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2114         if (!crtc) {
2115                 ret = -ENOENT;
2116                 goto out;
2117         }
2118
2119         du = vmw_crtc_to_du(crtc);
2120
2121         du->hotspot_x = arg->xhot;
2122         du->hotspot_y = arg->yhot;
2123
2124 out:
2125         mutex_unlock(&dev->mode_config.mutex);
2126
2127         return ret;
2128 }
2129
2130 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2131                         unsigned width, unsigned height, unsigned pitch,
2132                         unsigned bpp, unsigned depth)
2133 {
2134         if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2135                 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2136         else if (vmw_fifo_have_pitchlock(vmw_priv))
2137                 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2138         vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2139         vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2140         if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2141                 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2142
2143         if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2144                 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2145                           depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2146                 return -EINVAL;
2147         }
2148
2149         return 0;
2150 }
2151
2152 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2153                                 uint32_t pitch,
2154                                 uint32_t height)
2155 {
2156         return ((u64) pitch * (u64) height) < (u64)
2157                 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
2158                  dev_priv->max_primary_mem : dev_priv->vram_size);
2159 }
2160
2161 /**
2162  * vmw_du_update_layout - Update the display unit with topology from resolution
2163  * plugin and generate DRM uevent
2164  * @dev_priv: device private
2165  * @num_rects: number of drm_rect in rects
2166  * @rects: toplogy to update
2167  */
2168 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2169                                 unsigned int num_rects, struct drm_rect *rects)
2170 {
2171         struct drm_device *dev = &dev_priv->drm;
2172         struct vmw_display_unit *du;
2173         struct drm_connector *con;
2174         struct drm_connector_list_iter conn_iter;
2175         struct drm_modeset_acquire_ctx ctx;
2176         struct drm_crtc *crtc;
2177         int ret;
2178
2179         /* Currently gui_x/y is protected with the crtc mutex */
2180         mutex_lock(&dev->mode_config.mutex);
2181         drm_modeset_acquire_init(&ctx, 0);
2182 retry:
2183         drm_for_each_crtc(crtc, dev) {
2184                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2185                 if (ret < 0) {
2186                         if (ret == -EDEADLK) {
2187                                 drm_modeset_backoff(&ctx);
2188                                 goto retry;
2189                 }
2190                         goto out_fini;
2191                 }
2192         }
2193
2194         drm_connector_list_iter_begin(dev, &conn_iter);
2195         drm_for_each_connector_iter(con, &conn_iter) {
2196                 du = vmw_connector_to_du(con);
2197                 if (num_rects > du->unit) {
2198                         du->pref_width = drm_rect_width(&rects[du->unit]);
2199                         du->pref_height = drm_rect_height(&rects[du->unit]);
2200                         du->pref_active = true;
2201                         du->gui_x = rects[du->unit].x1;
2202                         du->gui_y = rects[du->unit].y1;
2203                 } else {
2204                         du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2205                         du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2206                         du->pref_active = false;
2207                         du->gui_x = 0;
2208                         du->gui_y = 0;
2209                 }
2210         }
2211         drm_connector_list_iter_end(&conn_iter);
2212
2213         list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2214                 du = vmw_connector_to_du(con);
2215                 if (num_rects > du->unit) {
2216                         drm_object_property_set_value
2217                           (&con->base, dev->mode_config.suggested_x_property,
2218                            du->gui_x);
2219                         drm_object_property_set_value
2220                           (&con->base, dev->mode_config.suggested_y_property,
2221                            du->gui_y);
2222                 } else {
2223                         drm_object_property_set_value
2224                           (&con->base, dev->mode_config.suggested_x_property,
2225                            0);
2226                         drm_object_property_set_value
2227                           (&con->base, dev->mode_config.suggested_y_property,
2228                            0);
2229                 }
2230                 con->status = vmw_du_connector_detect(con, true);
2231         }
2232 out_fini:
2233         drm_modeset_drop_locks(&ctx);
2234         drm_modeset_acquire_fini(&ctx);
2235         mutex_unlock(&dev->mode_config.mutex);
2236
2237         drm_sysfs_hotplug_event(dev);
2238
2239         return 0;
2240 }
2241
2242 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2243                           u16 *r, u16 *g, u16 *b,
2244                           uint32_t size,
2245                           struct drm_modeset_acquire_ctx *ctx)
2246 {
2247         struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2248         int i;
2249
2250         for (i = 0; i < size; i++) {
2251                 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2252                           r[i], g[i], b[i]);
2253                 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2254                 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2255                 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2256         }
2257
2258         return 0;
2259 }
2260
2261 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2262 {
2263         return 0;
2264 }
2265
2266 enum drm_connector_status
2267 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2268 {
2269         uint32_t num_displays;
2270         struct drm_device *dev = connector->dev;
2271         struct vmw_private *dev_priv = vmw_priv(dev);
2272         struct vmw_display_unit *du = vmw_connector_to_du(connector);
2273
2274         num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2275
2276         return ((vmw_connector_to_du(connector)->unit < num_displays &&
2277                  du->pref_active) ?
2278                 connector_status_connected : connector_status_disconnected);
2279 }
2280
2281 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2282         /* 640x480@60Hz */
2283         { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2284                    752, 800, 0, 480, 489, 492, 525, 0,
2285                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2286         /* 800x600@60Hz */
2287         { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2288                    968, 1056, 0, 600, 601, 605, 628, 0,
2289                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2290         /* 1024x768@60Hz */
2291         { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2292                    1184, 1344, 0, 768, 771, 777, 806, 0,
2293                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2294         /* 1152x864@75Hz */
2295         { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2296                    1344, 1600, 0, 864, 865, 868, 900, 0,
2297                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2298         /* 1280x720@60Hz */
2299         { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2300                    1472, 1664, 0, 720, 723, 728, 748, 0,
2301                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2302         /* 1280x768@60Hz */
2303         { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2304                    1472, 1664, 0, 768, 771, 778, 798, 0,
2305                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2306         /* 1280x800@60Hz */
2307         { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2308                    1480, 1680, 0, 800, 803, 809, 831, 0,
2309                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2310         /* 1280x960@60Hz */
2311         { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2312                    1488, 1800, 0, 960, 961, 964, 1000, 0,
2313                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2314         /* 1280x1024@60Hz */
2315         { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2316                    1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2317                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2318         /* 1360x768@60Hz */
2319         { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2320                    1536, 1792, 0, 768, 771, 777, 795, 0,
2321                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2322         /* 1440x1050@60Hz */
2323         { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2324                    1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2325                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2326         /* 1440x900@60Hz */
2327         { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2328                    1672, 1904, 0, 900, 903, 909, 934, 0,
2329                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2330         /* 1600x1200@60Hz */
2331         { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2332                    1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2333                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2334         /* 1680x1050@60Hz */
2335         { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2336                    1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2337                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2338         /* 1792x1344@60Hz */
2339         { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2340                    2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2341                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2342         /* 1853x1392@60Hz */
2343         { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2344                    2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2345                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2346         /* 1920x1080@60Hz */
2347         { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2348                    2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2349                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2350         /* 1920x1200@60Hz */
2351         { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2352                    2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2353                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2354         /* 1920x1440@60Hz */
2355         { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2356                    2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2357                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2358         /* 2560x1440@60Hz */
2359         { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2360                    2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2361                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2362         /* 2560x1600@60Hz */
2363         { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2364                    3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2365                    DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2366         /* 2880x1800@60Hz */
2367         { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2368                    2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2369                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2370         /* 3840x2160@60Hz */
2371         { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2372                    3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2373                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2374         /* 3840x2400@60Hz */
2375         { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2376                    3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2377                    DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2378         /* Terminate */
2379         { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2380 };
2381
2382 /**
2383  * vmw_guess_mode_timing - Provide fake timings for a
2384  * 60Hz vrefresh mode.
2385  *
2386  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2387  * members filled in.
2388  */
2389 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2390 {
2391         mode->hsync_start = mode->hdisplay + 50;
2392         mode->hsync_end = mode->hsync_start + 50;
2393         mode->htotal = mode->hsync_end + 50;
2394
2395         mode->vsync_start = mode->vdisplay + 50;
2396         mode->vsync_end = mode->vsync_start + 50;
2397         mode->vtotal = mode->vsync_end + 50;
2398
2399         mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2400 }
2401
2402
2403 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2404                                 uint32_t max_width, uint32_t max_height)
2405 {
2406         struct vmw_display_unit *du = vmw_connector_to_du(connector);
2407         struct drm_device *dev = connector->dev;
2408         struct vmw_private *dev_priv = vmw_priv(dev);
2409         struct drm_display_mode *mode = NULL;
2410         struct drm_display_mode *bmode;
2411         struct drm_display_mode prefmode = { DRM_MODE("preferred",
2412                 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2413                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2414                 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2415         };
2416         int i;
2417         u32 assumed_bpp = 4;
2418
2419         if (dev_priv->assume_16bpp)
2420                 assumed_bpp = 2;
2421
2422         max_width  = min(max_width,  dev_priv->texture_max_width);
2423         max_height = min(max_height, dev_priv->texture_max_height);
2424
2425         /*
2426          * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2427          * HEIGHT registers.
2428          */
2429         if (dev_priv->active_display_unit == vmw_du_screen_target) {
2430                 max_width  = min(max_width,  dev_priv->stdu_max_width);
2431                 max_height = min(max_height, dev_priv->stdu_max_height);
2432         }
2433
2434         /* Add preferred mode */
2435         mode = drm_mode_duplicate(dev, &prefmode);
2436         if (!mode)
2437                 return 0;
2438         mode->hdisplay = du->pref_width;
2439         mode->vdisplay = du->pref_height;
2440         vmw_guess_mode_timing(mode);
2441         drm_mode_set_name(mode);
2442
2443         if (vmw_kms_validate_mode_vram(dev_priv,
2444                                         mode->hdisplay * assumed_bpp,
2445                                         mode->vdisplay)) {
2446                 drm_mode_probed_add(connector, mode);
2447         } else {
2448                 drm_mode_destroy(dev, mode);
2449                 mode = NULL;
2450         }
2451
2452         if (du->pref_mode) {
2453                 list_del_init(&du->pref_mode->head);
2454                 drm_mode_destroy(dev, du->pref_mode);
2455         }
2456
2457         /* mode might be null here, this is intended */
2458         du->pref_mode = mode;
2459
2460         for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2461                 bmode = &vmw_kms_connector_builtin[i];
2462                 if (bmode->hdisplay > max_width ||
2463                     bmode->vdisplay > max_height)
2464                         continue;
2465
2466                 if (!vmw_kms_validate_mode_vram(dev_priv,
2467                                                 bmode->hdisplay * assumed_bpp,
2468                                                 bmode->vdisplay))
2469                         continue;
2470
2471                 mode = drm_mode_duplicate(dev, bmode);
2472                 if (!mode)
2473                         return 0;
2474
2475                 drm_mode_probed_add(connector, mode);
2476         }
2477
2478         drm_connector_list_update(connector);
2479         /* Move the prefered mode first, help apps pick the right mode. */
2480         drm_mode_sort(&connector->modes);
2481
2482         return 1;
2483 }
2484
2485 /**
2486  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2487  * @dev: drm device for the ioctl
2488  * @data: data pointer for the ioctl
2489  * @file_priv: drm file for the ioctl call
2490  *
2491  * Update preferred topology of display unit as per ioctl request. The topology
2492  * is expressed as array of drm_vmw_rect.
2493  * e.g.
2494  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2495  *
2496  * NOTE:
2497  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2498  * device limit on topology, x + w and y + h (lower right) cannot be greater
2499  * than INT_MAX. So topology beyond these limits will return with error.
2500  *
2501  * Returns:
2502  * Zero on success, negative errno on failure.
2503  */
2504 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2505                                 struct drm_file *file_priv)
2506 {
2507         struct vmw_private *dev_priv = vmw_priv(dev);
2508         struct drm_mode_config *mode_config = &dev->mode_config;
2509         struct drm_vmw_update_layout_arg *arg =
2510                 (struct drm_vmw_update_layout_arg *)data;
2511         void __user *user_rects;
2512         struct drm_vmw_rect *rects;
2513         struct drm_rect *drm_rects;
2514         unsigned rects_size;
2515         int ret, i;
2516
2517         if (!arg->num_outputs) {
2518                 struct drm_rect def_rect = {0, 0,
2519                                             VMWGFX_MIN_INITIAL_WIDTH,
2520                                             VMWGFX_MIN_INITIAL_HEIGHT};
2521                 vmw_du_update_layout(dev_priv, 1, &def_rect);
2522                 return 0;
2523         }
2524
2525         rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2526         rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2527                         GFP_KERNEL);
2528         if (unlikely(!rects))
2529                 return -ENOMEM;
2530
2531         user_rects = (void __user *)(unsigned long)arg->rects;
2532         ret = copy_from_user(rects, user_rects, rects_size);
2533         if (unlikely(ret != 0)) {
2534                 DRM_ERROR("Failed to get rects.\n");
2535                 ret = -EFAULT;
2536                 goto out_free;
2537         }
2538
2539         drm_rects = (struct drm_rect *)rects;
2540
2541         VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2542         for (i = 0; i < arg->num_outputs; i++) {
2543                 struct drm_vmw_rect curr_rect;
2544
2545                 /* Verify user-space for overflow as kernel use drm_rect */
2546                 if ((rects[i].x + rects[i].w > INT_MAX) ||
2547                     (rects[i].y + rects[i].h > INT_MAX)) {
2548                         ret = -ERANGE;
2549                         goto out_free;
2550                 }
2551
2552                 curr_rect = rects[i];
2553                 drm_rects[i].x1 = curr_rect.x;
2554                 drm_rects[i].y1 = curr_rect.y;
2555                 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2556                 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2557
2558                 VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2559                               drm_rects[i].x1, drm_rects[i].y1,
2560                               drm_rects[i].x2, drm_rects[i].y2);
2561
2562                 /*
2563                  * Currently this check is limiting the topology within
2564                  * mode_config->max (which actually is max texture size
2565                  * supported by virtual device). This limit is here to address
2566                  * window managers that create a big framebuffer for whole
2567                  * topology.
2568                  */
2569                 if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2570                     drm_rects[i].x2 > mode_config->max_width ||
2571                     drm_rects[i].y2 > mode_config->max_height) {
2572                         VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2573                                       drm_rects[i].x1, drm_rects[i].y1,
2574                                       drm_rects[i].x2, drm_rects[i].y2);
2575                         ret = -EINVAL;
2576                         goto out_free;
2577                 }
2578         }
2579
2580         ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2581
2582         if (ret == 0)
2583                 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2584
2585 out_free:
2586         kfree(rects);
2587         return ret;
2588 }
2589
2590 /**
2591  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2592  * on a set of cliprects and a set of display units.
2593  *
2594  * @dev_priv: Pointer to a device private structure.
2595  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2596  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2597  * Cliprects are given in framebuffer coordinates.
2598  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2599  * be NULL. Cliprects are given in source coordinates.
2600  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2601  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2602  * @num_clips: Number of cliprects in the @clips or @vclips array.
2603  * @increment: Integer with which to increment the clip counter when looping.
2604  * Used to skip a predetermined number of clip rects.
2605  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2606  */
2607 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2608                          struct vmw_framebuffer *framebuffer,
2609                          const struct drm_clip_rect *clips,
2610                          const struct drm_vmw_rect *vclips,
2611                          s32 dest_x, s32 dest_y,
2612                          int num_clips,
2613                          int increment,
2614                          struct vmw_kms_dirty *dirty)
2615 {
2616         struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2617         struct drm_crtc *crtc;
2618         u32 num_units = 0;
2619         u32 i, k;
2620
2621         dirty->dev_priv = dev_priv;
2622
2623         /* If crtc is passed, no need to iterate over other display units */
2624         if (dirty->crtc) {
2625                 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2626         } else {
2627                 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2628                                     head) {
2629                         struct drm_plane *plane = crtc->primary;
2630
2631                         if (plane->state->fb == &framebuffer->base)
2632                                 units[num_units++] = vmw_crtc_to_du(crtc);
2633                 }
2634         }
2635
2636         for (k = 0; k < num_units; k++) {
2637                 struct vmw_display_unit *unit = units[k];
2638                 s32 crtc_x = unit->crtc.x;
2639                 s32 crtc_y = unit->crtc.y;
2640                 s32 crtc_width = unit->crtc.mode.hdisplay;
2641                 s32 crtc_height = unit->crtc.mode.vdisplay;
2642                 const struct drm_clip_rect *clips_ptr = clips;
2643                 const struct drm_vmw_rect *vclips_ptr = vclips;
2644
2645                 dirty->unit = unit;
2646                 if (dirty->fifo_reserve_size > 0) {
2647                         dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2648                                                       dirty->fifo_reserve_size);
2649                         if (!dirty->cmd)
2650                                 return -ENOMEM;
2651
2652                         memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2653                 }
2654                 dirty->num_hits = 0;
2655                 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2656                        vclips_ptr += increment) {
2657                         s32 clip_left;
2658                         s32 clip_top;
2659
2660                         /*
2661                          * Select clip array type. Note that integer type
2662                          * in @clips is unsigned short, whereas in @vclips
2663                          * it's 32-bit.
2664                          */
2665                         if (clips) {
2666                                 dirty->fb_x = (s32) clips_ptr->x1;
2667                                 dirty->fb_y = (s32) clips_ptr->y1;
2668                                 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2669                                         crtc_x;
2670                                 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2671                                         crtc_y;
2672                         } else {
2673                                 dirty->fb_x = vclips_ptr->x;
2674                                 dirty->fb_y = vclips_ptr->y;
2675                                 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2676                                         dest_x - crtc_x;
2677                                 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2678                                         dest_y - crtc_y;
2679                         }
2680
2681                         dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2682                         dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2683
2684                         /* Skip this clip if it's outside the crtc region */
2685                         if (dirty->unit_x1 >= crtc_width ||
2686                             dirty->unit_y1 >= crtc_height ||
2687                             dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2688                                 continue;
2689
2690                         /* Clip right and bottom to crtc limits */
2691                         dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2692                                                crtc_width);
2693                         dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2694                                                crtc_height);
2695
2696                         /* Clip left and top to crtc limits */
2697                         clip_left = min_t(s32, dirty->unit_x1, 0);
2698                         clip_top = min_t(s32, dirty->unit_y1, 0);
2699                         dirty->unit_x1 -= clip_left;
2700                         dirty->unit_y1 -= clip_top;
2701                         dirty->fb_x -= clip_left;
2702                         dirty->fb_y -= clip_top;
2703
2704                         dirty->clip(dirty);
2705                 }
2706
2707                 dirty->fifo_commit(dirty);
2708         }
2709
2710         return 0;
2711 }
2712
2713 /**
2714  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2715  * cleanup and fencing
2716  * @dev_priv: Pointer to the device-private struct
2717  * @file_priv: Pointer identifying the client when user-space fencing is used
2718  * @ctx: Pointer to the validation context
2719  * @out_fence: If non-NULL, returned refcounted fence-pointer
2720  * @user_fence_rep: If non-NULL, pointer to user-space address area
2721  * in which to copy user-space fence info
2722  */
2723 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2724                                       struct drm_file *file_priv,
2725                                       struct vmw_validation_context *ctx,
2726                                       struct vmw_fence_obj **out_fence,
2727                                       struct drm_vmw_fence_rep __user *
2728                                       user_fence_rep)
2729 {
2730         struct vmw_fence_obj *fence = NULL;
2731         uint32_t handle = 0;
2732         int ret = 0;
2733
2734         if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2735             out_fence)
2736                 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2737                                                  file_priv ? &handle : NULL);
2738         vmw_validation_done(ctx, fence);
2739         if (file_priv)
2740                 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2741                                             ret, user_fence_rep, fence,
2742                                             handle, -1);
2743         if (out_fence)
2744                 *out_fence = fence;
2745         else
2746                 vmw_fence_obj_unreference(&fence);
2747 }
2748
2749 /**
2750  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2751  * its backing MOB.
2752  *
2753  * @res: Pointer to the surface resource
2754  * @clips: Clip rects in framebuffer (surface) space.
2755  * @num_clips: Number of clips in @clips.
2756  * @increment: Integer with which to increment the clip counter when looping.
2757  * Used to skip a predetermined number of clip rects.
2758  *
2759  * This function makes sure the proxy surface is updated from its backing MOB
2760  * using the region given by @clips. The surface resource @res and its backing
2761  * MOB needs to be reserved and validated on call.
2762  */
2763 int vmw_kms_update_proxy(struct vmw_resource *res,
2764                          const struct drm_clip_rect *clips,
2765                          unsigned num_clips,
2766                          int increment)
2767 {
2768         struct vmw_private *dev_priv = res->dev_priv;
2769         struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2770         struct {
2771                 SVGA3dCmdHeader header;
2772                 SVGA3dCmdUpdateGBImage body;
2773         } *cmd;
2774         SVGA3dBox *box;
2775         size_t copy_size = 0;
2776         int i;
2777
2778         if (!clips)
2779                 return 0;
2780
2781         cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2782         if (!cmd)
2783                 return -ENOMEM;
2784
2785         for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2786                 box = &cmd->body.box;
2787
2788                 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2789                 cmd->header.size = sizeof(cmd->body);
2790                 cmd->body.image.sid = res->id;
2791                 cmd->body.image.face = 0;
2792                 cmd->body.image.mipmap = 0;
2793
2794                 if (clips->x1 > size->width || clips->x2 > size->width ||
2795                     clips->y1 > size->height || clips->y2 > size->height) {
2796                         DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2797                         return -EINVAL;
2798                 }
2799
2800                 box->x = clips->x1;
2801                 box->y = clips->y1;
2802                 box->z = 0;
2803                 box->w = clips->x2 - clips->x1;
2804                 box->h = clips->y2 - clips->y1;
2805                 box->d = 1;
2806
2807                 copy_size += sizeof(*cmd);
2808         }
2809
2810         vmw_cmd_commit(dev_priv, copy_size);
2811
2812         return 0;
2813 }
2814
2815 /**
2816  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2817  * property.
2818  *
2819  * @dev_priv: Pointer to a device private struct.
2820  *
2821  * Sets up the implicit placement property unless it's already set up.
2822  */
2823 void
2824 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2825 {
2826         if (dev_priv->implicit_placement_property)
2827                 return;
2828
2829         dev_priv->implicit_placement_property =
2830                 drm_property_create_range(&dev_priv->drm,
2831                                           DRM_MODE_PROP_IMMUTABLE,
2832                                           "implicit_placement", 0, 1);
2833 }
2834
2835 /**
2836  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2837  *
2838  * @dev: Pointer to the drm device
2839  * Return: 0 on success. Negative error code on failure.
2840  */
2841 int vmw_kms_suspend(struct drm_device *dev)
2842 {
2843         struct vmw_private *dev_priv = vmw_priv(dev);
2844
2845         dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2846         if (IS_ERR(dev_priv->suspend_state)) {
2847                 int ret = PTR_ERR(dev_priv->suspend_state);
2848
2849                 DRM_ERROR("Failed kms suspend: %d\n", ret);
2850                 dev_priv->suspend_state = NULL;
2851
2852                 return ret;
2853         }
2854
2855         return 0;
2856 }
2857
2858
2859 /**
2860  * vmw_kms_resume - Re-enable modesetting and restore state
2861  *
2862  * @dev: Pointer to the drm device
2863  * Return: 0 on success. Negative error code on failure.
2864  *
2865  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2866  * to call this function without a previous vmw_kms_suspend().
2867  */
2868 int vmw_kms_resume(struct drm_device *dev)
2869 {
2870         struct vmw_private *dev_priv = vmw_priv(dev);
2871         int ret;
2872
2873         if (WARN_ON(!dev_priv->suspend_state))
2874                 return 0;
2875
2876         ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2877         dev_priv->suspend_state = NULL;
2878
2879         return ret;
2880 }
2881
2882 /**
2883  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2884  *
2885  * @dev: Pointer to the drm device
2886  */
2887 void vmw_kms_lost_device(struct drm_device *dev)
2888 {
2889         drm_atomic_helper_shutdown(dev);
2890 }
2891
2892 /**
2893  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2894  * @update: The closure structure.
2895  *
2896  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2897  * update on display unit.
2898  *
2899  * Return: 0 on success or a negative error code on failure.
2900  */
2901 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2902 {
2903         struct drm_plane_state *state = update->plane->state;
2904         struct drm_plane_state *old_state = update->old_state;
2905         struct drm_atomic_helper_damage_iter iter;
2906         struct drm_rect clip;
2907         struct drm_rect bb;
2908         DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2909         uint32_t reserved_size = 0;
2910         uint32_t submit_size = 0;
2911         uint32_t curr_size = 0;
2912         uint32_t num_hits = 0;
2913         void *cmd_start;
2914         char *cmd_next;
2915         int ret;
2916
2917         /*
2918          * Iterate in advance to check if really need plane update and find the
2919          * number of clips that actually are in plane src for fifo allocation.
2920          */
2921         drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2922         drm_atomic_for_each_plane_damage(&iter, &clip)
2923                 num_hits++;
2924
2925         if (num_hits == 0)
2926                 return 0;
2927
2928         if (update->vfb->bo) {
2929                 struct vmw_framebuffer_bo *vfbbo =
2930                         container_of(update->vfb, typeof(*vfbbo), base);
2931
2932                 /*
2933                  * For screen targets we want a mappable bo, for everything else we want
2934                  * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2935                  * is not screen target then mob's shouldn't be available.
2936                  */
2937                 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2938                         vmw_bo_placement_set(vfbbo->buffer,
2939                                              VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2940                                              VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2941                 } else {
2942                         WARN_ON(update->dev_priv->has_mob);
2943                         vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2944                 }
2945                 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2946         } else {
2947                 struct vmw_framebuffer_surface *vfbs =
2948                         container_of(update->vfb, typeof(*vfbs), base);
2949
2950                 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2951                                                   0, VMW_RES_DIRTY_NONE, NULL,
2952                                                   NULL);
2953         }
2954
2955         if (ret)
2956                 return ret;
2957
2958         ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2959         if (ret)
2960                 goto out_unref;
2961
2962         reserved_size = update->calc_fifo_size(update, num_hits);
2963         cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2964         if (!cmd_start) {
2965                 ret = -ENOMEM;
2966                 goto out_revert;
2967         }
2968
2969         cmd_next = cmd_start;
2970
2971         if (update->post_prepare) {
2972                 curr_size = update->post_prepare(update, cmd_next);
2973                 cmd_next += curr_size;
2974                 submit_size += curr_size;
2975         }
2976
2977         if (update->pre_clip) {
2978                 curr_size = update->pre_clip(update, cmd_next, num_hits);
2979                 cmd_next += curr_size;
2980                 submit_size += curr_size;
2981         }
2982
2983         bb.x1 = INT_MAX;
2984         bb.y1 = INT_MAX;
2985         bb.x2 = INT_MIN;
2986         bb.y2 = INT_MIN;
2987
2988         drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2989         drm_atomic_for_each_plane_damage(&iter, &clip) {
2990                 uint32_t fb_x = clip.x1;
2991                 uint32_t fb_y = clip.y1;
2992
2993                 vmw_du_translate_to_crtc(state, &clip);
2994                 if (update->clip) {
2995                         curr_size = update->clip(update, cmd_next, &clip, fb_x,
2996                                                  fb_y);
2997                         cmd_next += curr_size;
2998                         submit_size += curr_size;
2999                 }
3000                 bb.x1 = min_t(int, bb.x1, clip.x1);
3001                 bb.y1 = min_t(int, bb.y1, clip.y1);
3002                 bb.x2 = max_t(int, bb.x2, clip.x2);
3003                 bb.y2 = max_t(int, bb.y2, clip.y2);
3004         }
3005
3006         curr_size = update->post_clip(update, cmd_next, &bb);
3007         submit_size += curr_size;
3008
3009         if (reserved_size < submit_size)
3010                 submit_size = 0;
3011
3012         vmw_cmd_commit(update->dev_priv, submit_size);
3013
3014         vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3015                                          update->out_fence, NULL);
3016         return ret;
3017
3018 out_revert:
3019         vmw_validation_revert(&val_ctx);
3020
3021 out_unref:
3022         vmw_validation_unref_lists(&val_ctx);
3023         return ret;
3024 }
This page took 0.213634 seconds and 4 git commands to generate.