1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include "vmwgfx_kms.h"
29 #include "vmwgfx_bo.h"
30 #include "vmw_surface_cache.h"
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_damage_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_sysfs.h>
39 void vmw_du_cleanup(struct vmw_display_unit *du)
41 struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
42 drm_plane_cleanup(&du->primary);
43 if (vmw_cmd_supported(dev_priv))
44 drm_plane_cleanup(&du->cursor.base);
46 drm_connector_unregister(&du->connector);
47 drm_crtc_cleanup(&du->crtc);
48 drm_encoder_cleanup(&du->encoder);
49 drm_connector_cleanup(&du->connector);
53 * Display Unit Cursor functions
56 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
57 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
58 struct vmw_plane_state *vps,
59 u32 *image, u32 width, u32 height,
60 u32 hotspotX, u32 hotspotY);
62 struct vmw_svga_fifo_cmd_define_cursor {
64 SVGAFifoCmdDefineAlphaCursor cursor;
68 * vmw_send_define_cursor_cmd - queue a define cursor command
69 * @dev_priv: the private driver struct
70 * @image: buffer which holds the cursor image
71 * @width: width of the mouse cursor image
72 * @height: height of the mouse cursor image
73 * @hotspotX: the horizontal position of mouse hotspot
74 * @hotspotY: the vertical position of mouse hotspot
76 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
77 u32 *image, u32 width, u32 height,
78 u32 hotspotX, u32 hotspotY)
80 struct vmw_svga_fifo_cmd_define_cursor *cmd;
81 const u32 image_size = width * height * sizeof(*image);
82 const u32 cmd_size = sizeof(*cmd) + image_size;
84 /* Try to reserve fifocmd space and swallow any failures;
85 such reservations cannot be left unconsumed for long
86 under the risk of clogging other fifocmd users, so
87 we treat reservations separtely from the way we treat
88 other fallible KMS-atomic resources at prepare_fb */
89 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
94 memset(cmd, 0, sizeof(*cmd));
96 memcpy(&cmd[1], image, image_size);
98 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
100 cmd->cursor.width = width;
101 cmd->cursor.height = height;
102 cmd->cursor.hotspotX = hotspotX;
103 cmd->cursor.hotspotY = hotspotY;
105 vmw_cmd_commit_flush(dev_priv, cmd_size);
109 * vmw_cursor_update_image - update the cursor image on the provided plane
110 * @dev_priv: the private driver struct
111 * @vps: the plane state of the cursor plane
112 * @image: buffer which holds the cursor image
113 * @width: width of the mouse cursor image
114 * @height: height of the mouse cursor image
115 * @hotspotX: the horizontal position of mouse hotspot
116 * @hotspotY: the vertical position of mouse hotspot
118 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
119 struct vmw_plane_state *vps,
120 u32 *image, u32 width, u32 height,
121 u32 hotspotX, u32 hotspotY)
124 vmw_cursor_update_mob(dev_priv, vps, image,
125 vps->base.crtc_w, vps->base.crtc_h,
129 vmw_send_define_cursor_cmd(dev_priv, image, width, height,
135 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
137 * Called from inside vmw_du_cursor_plane_atomic_update to actually
138 * make the cursor-image live.
140 * @dev_priv: device to work with
141 * @vps: the plane state of the cursor plane
142 * @image: cursor source data to fill the MOB with
143 * @width: source data width
144 * @height: source data height
145 * @hotspotX: cursor hotspot x
146 * @hotspotY: cursor hotspot Y
148 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
149 struct vmw_plane_state *vps,
150 u32 *image, u32 width, u32 height,
151 u32 hotspotX, u32 hotspotY)
153 SVGAGBCursorHeader *header;
154 SVGAGBAlphaCursorHeader *alpha_header;
155 const u32 image_size = width * height * sizeof(*image);
157 header = vmw_bo_map_and_cache(vps->cursor.bo);
158 alpha_header = &header->header.alphaHeader;
160 memset(header, 0, sizeof(*header));
162 header->type = SVGA_ALPHA_CURSOR;
163 header->sizeInBytes = image_size;
165 alpha_header->hotspotX = hotspotX;
166 alpha_header->hotspotY = hotspotY;
167 alpha_header->width = width;
168 alpha_header->height = height;
170 memcpy(header + 1, image, image_size);
171 vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
172 vps->cursor.bo->tbo.resource->start);
176 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
178 return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
182 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
183 * @vps: cursor plane state
185 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
189 if (vps->surf_mapped)
190 return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
191 return vps->surf->snooper.image;
193 return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
197 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
198 struct vmw_plane_state *new_vps)
205 if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
206 old_vps->base.crtc_h != new_vps->base.crtc_h)
209 if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
210 old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
213 size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
215 old_image = vmw_du_cursor_plane_acquire_image(old_vps);
216 new_image = vmw_du_cursor_plane_acquire_image(new_vps);
219 if (old_image && new_image)
220 changed = memcmp(old_image, new_image, size) != 0;
225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
230 ttm_bo_unpin(&(*vbo)->tbo);
231 vmw_bo_unreference(vbo);
234 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
235 struct vmw_plane_state *vps)
242 vmw_du_cursor_plane_unmap_cm(vps);
244 /* Look for a free slot to return this mob to the cache. */
245 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
246 if (!vcp->cursor_mobs[i]) {
247 vcp->cursor_mobs[i] = vps->cursor.bo;
248 vps->cursor.bo = NULL;
253 /* Cache is full: See if this mob is bigger than an existing mob. */
254 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
255 if (vcp->cursor_mobs[i]->tbo.base.size <
256 vps->cursor.bo->tbo.base.size) {
257 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
258 vcp->cursor_mobs[i] = vps->cursor.bo;
259 vps->cursor.bo = NULL;
264 /* Destroy it if it's not worth caching. */
265 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
268 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
269 struct vmw_plane_state *vps)
271 struct vmw_private *dev_priv = vcp->base.dev->dev_private;
272 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
274 u32 cursor_max_dim, mob_max_size;
277 if (!dev_priv->has_mob ||
278 (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
281 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
282 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
284 if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
285 vps->base.crtc_h > cursor_max_dim)
288 if (vps->cursor.bo) {
289 if (vps->cursor.bo->tbo.base.size >= size)
291 vmw_du_put_cursor_mob(vcp, vps);
294 /* Look for an unused mob in the cache. */
295 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
296 if (vcp->cursor_mobs[i] &&
297 vcp->cursor_mobs[i]->tbo.base.size >= size) {
298 vps->cursor.bo = vcp->cursor_mobs[i];
299 vcp->cursor_mobs[i] = NULL;
303 /* Create a new mob if we can't find an existing one. */
304 ret = vmw_bo_create_and_populate(dev_priv, size,
311 /* Fence the mob creation so we are guarateed to have the mob */
312 ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
316 vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
317 ttm_bo_unreserve(&vps->cursor.bo->tbo);
321 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
326 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
327 bool show, int x, int y)
329 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
330 : SVGA_CURSOR_ON_HIDE;
333 spin_lock(&dev_priv->cursor_lock);
334 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
335 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
336 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
337 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
338 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
339 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
340 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
341 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
342 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
343 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
344 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
345 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
347 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
348 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
349 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
351 spin_unlock(&dev_priv->cursor_lock);
354 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
355 struct ttm_object_file *tfile,
356 struct ttm_buffer_object *bo,
357 SVGA3dCmdHeader *header)
359 struct ttm_bo_kmap_obj map;
360 unsigned long kmap_offset;
361 unsigned long kmap_num;
367 SVGA3dCmdHeader header;
368 SVGA3dCmdSurfaceDMA dma;
371 const struct SVGA3dSurfaceDesc *desc =
372 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
373 const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
375 cmd = container_of(header, struct vmw_dma_cmd, header);
377 /* No snooper installed, nothing to copy */
378 if (!srf->snooper.image)
381 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
382 DRM_ERROR("face and mipmap for cursors should never != 0\n");
386 if (cmd->header.size < 64) {
387 DRM_ERROR("at least one full copy box must be given\n");
391 box = (SVGA3dCopyBox *)&cmd[1];
392 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
393 sizeof(SVGA3dCopyBox);
395 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
396 box->x != 0 || box->y != 0 || box->z != 0 ||
397 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
398 box->d != 1 || box_count != 1 ||
399 box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
400 /* TODO handle none page aligned offsets */
401 /* TODO handle more dst & src != 0 */
402 /* TODO handle more then one copy */
403 DRM_ERROR("Can't snoop dma request for cursor!\n");
404 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
405 box->srcx, box->srcy, box->srcz,
406 box->x, box->y, box->z,
407 box->w, box->h, box->d, box_count,
408 cmd->dma.guest.ptr.offset);
412 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
413 kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
415 ret = ttm_bo_reserve(bo, true, false, NULL);
416 if (unlikely(ret != 0)) {
417 DRM_ERROR("reserve failed\n");
421 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
422 if (unlikely(ret != 0))
425 virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
427 if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
428 memcpy(srf->snooper.image, virtual,
429 VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
431 /* Image is unsigned pointer. */
432 for (i = 0; i < box->h; i++)
433 memcpy(srf->snooper.image + i * image_pitch,
434 virtual + i * cmd->dma.guest.pitch,
435 box->w * desc->pitchBytesPerBlock);
442 ttm_bo_unreserve(bo);
446 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
448 * @dev_priv: Pointer to the device private struct.
450 * Clears all legacy hotspots.
452 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
454 struct drm_device *dev = &dev_priv->drm;
455 struct vmw_display_unit *du;
456 struct drm_crtc *crtc;
458 drm_modeset_lock_all(dev);
459 drm_for_each_crtc(crtc, dev) {
460 du = vmw_crtc_to_du(crtc);
465 drm_modeset_unlock_all(dev);
468 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
470 struct drm_device *dev = &dev_priv->drm;
471 struct vmw_display_unit *du;
472 struct drm_crtc *crtc;
474 mutex_lock(&dev->mode_config.mutex);
476 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
477 du = vmw_crtc_to_du(crtc);
478 if (!du->cursor_surface ||
479 du->cursor_age == du->cursor_surface->snooper.age ||
480 !du->cursor_surface->snooper.image)
483 du->cursor_age = du->cursor_surface->snooper.age;
484 vmw_send_define_cursor_cmd(dev_priv,
485 du->cursor_surface->snooper.image,
486 VMW_CURSOR_SNOOP_WIDTH,
487 VMW_CURSOR_SNOOP_HEIGHT,
488 du->hotspot_x + du->core_hotspot_x,
489 du->hotspot_y + du->core_hotspot_y);
492 mutex_unlock(&dev->mode_config.mutex);
496 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
498 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
501 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
503 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
504 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
506 drm_plane_cleanup(plane);
510 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
512 drm_plane_cleanup(plane);
514 /* Planes are static in our case so we don't free it */
519 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
521 * @vps: plane state associated with the display surface
522 * @unreference: true if we also want to unreference the display.
524 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
529 vmw_resource_unpin(&vps->surf->res);
535 DRM_ERROR("Surface still pinned\n");
536 vmw_surface_unreference(&vps->surf);
543 * vmw_du_plane_cleanup_fb - Unpins the plane surface
545 * @plane: display plane
546 * @old_state: Contains the FB to clean up
548 * Unpins the framebuffer surface
550 * Returns 0 on success
553 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
554 struct drm_plane_state *old_state)
556 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
558 vmw_du_plane_unpin_surf(vps, false);
563 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
567 * Returns 0 on success
571 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
574 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
575 struct ttm_buffer_object *bo;
580 bo = &vps->cursor.bo->tbo;
582 if (bo->base.size < size)
585 if (vps->cursor.bo->map.virtual)
588 ret = ttm_bo_reserve(bo, false, false, NULL);
589 if (unlikely(ret != 0))
592 vmw_bo_map_and_cache(vps->cursor.bo);
594 ttm_bo_unreserve(bo);
596 if (unlikely(ret != 0))
604 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
606 * @vps: state of the cursor plane
608 * Returns 0 on success
612 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
615 struct vmw_bo *vbo = vps->cursor.bo;
617 if (!vbo || !vbo->map.virtual)
620 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
621 if (likely(ret == 0)) {
623 ttm_bo_unreserve(&vbo->tbo);
631 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
633 * @plane: cursor plane
634 * @old_state: contains the state to clean up
636 * Unmaps all cursor bo mappings and unpins the cursor surface
638 * Returns 0 on success
641 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
642 struct drm_plane_state *old_state)
644 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
645 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
648 if (vps->surf_mapped) {
649 vmw_bo_unmap(vps->surf->res.guest_memory_bo);
650 vps->surf_mapped = false;
653 if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
654 const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
656 if (likely(ret == 0)) {
657 ttm_bo_kunmap(&vps->bo->map);
658 ttm_bo_unreserve(&vps->bo->tbo);
662 vmw_du_cursor_plane_unmap_cm(vps);
663 vmw_du_put_cursor_mob(vcp, vps);
665 vmw_du_plane_unpin_surf(vps, false);
668 vmw_surface_unreference(&vps->surf);
673 vmw_bo_unreference(&vps->bo);
680 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
682 * @plane: display plane
683 * @new_state: info on the new plane state, including the FB
685 * Returns 0 on success
688 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
689 struct drm_plane_state *new_state)
691 struct drm_framebuffer *fb = new_state->fb;
692 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
693 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
697 vmw_surface_unreference(&vps->surf);
702 vmw_bo_unreference(&vps->bo);
707 if (vmw_framebuffer_to_vfb(fb)->bo) {
708 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
709 vmw_bo_reference(vps->bo);
711 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
712 vmw_surface_reference(vps->surf);
716 if (!vps->surf && vps->bo) {
717 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
720 * Not using vmw_bo_map_and_cache() helper here as we need to
721 * reserve the ttm_buffer_object first which
722 * vmw_bo_map_and_cache() omits.
724 ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
726 if (unlikely(ret != 0))
729 ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
731 ttm_bo_unreserve(&vps->bo->tbo);
733 if (unlikely(ret != 0))
735 } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
737 WARN_ON(vps->surf->snooper.image);
738 ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
740 if (unlikely(ret != 0))
742 vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
743 ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
744 vps->surf_mapped = true;
747 if (vps->surf || vps->bo) {
748 vmw_du_get_cursor_mob(vcp, vps);
749 vmw_du_cursor_plane_map_cm(vps);
757 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
758 struct drm_atomic_state *state)
760 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
762 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
764 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
765 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
766 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
767 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
768 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
769 s32 hotspot_x, hotspot_y;
771 hotspot_x = du->hotspot_x + new_state->hotspot_x;
772 hotspot_y = du->hotspot_y + new_state->hotspot_y;
774 du->cursor_surface = vps->surf;
775 du->cursor_bo = vps->bo;
777 if (!vps->surf && !vps->bo) {
778 vmw_cursor_update_position(dev_priv, false, 0, 0);
782 vps->cursor.hotspot_x = hotspot_x;
783 vps->cursor.hotspot_y = hotspot_y;
786 du->cursor_age = du->cursor_surface->snooper.age;
789 if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
791 * If it hasn't changed, avoid making the device do extra
792 * work by keeping the old cursor active.
794 struct vmw_cursor_plane_state tmp = old_vps->cursor;
795 old_vps->cursor = vps->cursor;
798 void *image = vmw_du_cursor_plane_acquire_image(vps);
800 vmw_cursor_update_image(dev_priv, vps, image,
803 hotspot_x, hotspot_y);
806 du->cursor_x = new_state->crtc_x + du->set_gui_x;
807 du->cursor_y = new_state->crtc_y + du->set_gui_y;
809 vmw_cursor_update_position(dev_priv, true,
810 du->cursor_x + hotspot_x,
811 du->cursor_y + hotspot_y);
813 du->core_hotspot_x = hotspot_x - du->hotspot_x;
814 du->core_hotspot_y = hotspot_y - du->hotspot_y;
819 * vmw_du_primary_plane_atomic_check - check if the new state is okay
821 * @plane: display plane
822 * @state: info on the new plane state, including the FB
824 * Check if the new state is settable given the current state. Other
825 * than what the atomic helper checks, we care about crtc fitting
826 * the FB and maintaining one active framebuffer.
828 * Returns 0 on success
830 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
831 struct drm_atomic_state *state)
833 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
835 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
837 struct drm_crtc_state *crtc_state = NULL;
838 struct drm_framebuffer *new_fb = new_state->fb;
839 struct drm_framebuffer *old_fb = old_state->fb;
843 * Ignore damage clips if the framebuffer attached to the plane's state
844 * has changed since the last plane update (page-flip). In this case, a
845 * full plane update should happen because uploads are done per-buffer.
847 if (old_fb != new_fb)
848 new_state->ignore_damage_clips = true;
851 crtc_state = drm_atomic_get_new_crtc_state(state,
854 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
855 DRM_PLANE_NO_SCALING,
856 DRM_PLANE_NO_SCALING,
859 if (!ret && new_fb) {
860 struct drm_crtc *crtc = new_state->crtc;
861 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
863 vmw_connector_state_to_vcs(du->connector.state);
872 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
874 * @plane: cursor plane
875 * @state: info on the new plane state
877 * This is a chance to fail if the new cursor state does not fit
880 * Returns 0 on success
882 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
883 struct drm_atomic_state *state)
885 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
888 struct drm_crtc_state *crtc_state = NULL;
889 struct vmw_surface *surface = NULL;
890 struct drm_framebuffer *fb = new_state->fb;
893 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
896 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
897 DRM_PLANE_NO_SCALING,
898 DRM_PLANE_NO_SCALING,
907 /* A lot of the code assumes this */
908 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
909 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
910 new_state->crtc_w, new_state->crtc_h);
914 if (!vmw_framebuffer_to_vfb(fb)->bo) {
915 surface = vmw_framebuffer_to_vfbs(fb)->surface;
920 (!surface->snooper.image && !surface->res.guest_memory_bo)) {
921 DRM_ERROR("surface not suitable for cursor\n");
930 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
931 struct drm_atomic_state *state)
933 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
935 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
936 int connector_mask = drm_connector_mask(&du->connector);
937 bool has_primary = new_state->plane_mask &
938 drm_plane_mask(crtc->primary);
940 /* We always want to have an active plane with an active CRTC */
941 if (has_primary != new_state->enable)
945 if (new_state->connector_mask != connector_mask &&
946 new_state->connector_mask != 0) {
947 DRM_ERROR("Invalid connectors configuration\n");
952 * Our virtual device does not have a dot clock, so use the logical
953 * clock value as the dot clock.
955 if (new_state->mode.crtc_clock == 0)
956 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
962 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
963 struct drm_atomic_state *state)
968 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
969 struct drm_atomic_state *state)
975 * vmw_du_crtc_duplicate_state - duplicate crtc state
978 * Allocates and returns a copy of the crtc state (both common and
979 * vmw-specific) for the specified crtc.
981 * Returns: The newly allocated crtc state, or NULL on failure.
983 struct drm_crtc_state *
984 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
986 struct drm_crtc_state *state;
987 struct vmw_crtc_state *vcs;
989 if (WARN_ON(!crtc->state))
992 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
999 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
1006 * vmw_du_crtc_reset - creates a blank vmw crtc state
1009 * Resets the atomic state for @crtc by freeing the state pointer (which
1010 * might be NULL, e.g. at driver load time) and allocating a new empty state
1013 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1015 struct vmw_crtc_state *vcs;
1019 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1021 kfree(vmw_crtc_state_to_vcs(crtc->state));
1024 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1027 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1031 __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1036 * vmw_du_crtc_destroy_state - destroy crtc state
1038 * @state: state object to destroy
1040 * Destroys the crtc state (both common and vmw-specific) for the
1044 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1045 struct drm_crtc_state *state)
1047 drm_atomic_helper_crtc_destroy_state(crtc, state);
1052 * vmw_du_plane_duplicate_state - duplicate plane state
1055 * Allocates and returns a copy of the plane state (both common and
1056 * vmw-specific) for the specified plane.
1058 * Returns: The newly allocated plane state, or NULL on failure.
1060 struct drm_plane_state *
1061 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1063 struct drm_plane_state *state;
1064 struct vmw_plane_state *vps;
1066 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1074 memset(&vps->cursor, 0, sizeof(vps->cursor));
1076 /* Each ref counted resource needs to be acquired again */
1078 (void) vmw_surface_reference(vps->surf);
1081 (void) vmw_bo_reference(vps->bo);
1085 __drm_atomic_helper_plane_duplicate_state(plane, state);
1092 * vmw_du_plane_reset - creates a blank vmw plane state
1095 * Resets the atomic state for @plane by freeing the state pointer (which might
1096 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1098 void vmw_du_plane_reset(struct drm_plane *plane)
1100 struct vmw_plane_state *vps;
1103 vmw_du_plane_destroy_state(plane, plane->state);
1105 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1108 DRM_ERROR("Cannot allocate vmw_plane_state\n");
1112 __drm_atomic_helper_plane_reset(plane, &vps->base);
1117 * vmw_du_plane_destroy_state - destroy plane state
1119 * @state: state object to destroy
1121 * Destroys the plane state (both common and vmw-specific) for the
1125 vmw_du_plane_destroy_state(struct drm_plane *plane,
1126 struct drm_plane_state *state)
1128 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1130 /* Should have been freed by cleanup_fb */
1132 vmw_surface_unreference(&vps->surf);
1135 vmw_bo_unreference(&vps->bo);
1137 drm_atomic_helper_plane_destroy_state(plane, state);
1142 * vmw_du_connector_duplicate_state - duplicate connector state
1143 * @connector: DRM connector
1145 * Allocates and returns a copy of the connector state (both common and
1146 * vmw-specific) for the specified connector.
1148 * Returns: The newly allocated connector state, or NULL on failure.
1150 struct drm_connector_state *
1151 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1153 struct drm_connector_state *state;
1154 struct vmw_connector_state *vcs;
1156 if (WARN_ON(!connector->state))
1159 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1166 __drm_atomic_helper_connector_duplicate_state(connector, state);
1173 * vmw_du_connector_reset - creates a blank vmw connector state
1174 * @connector: DRM connector
1176 * Resets the atomic state for @connector by freeing the state pointer (which
1177 * might be NULL, e.g. at driver load time) and allocating a new empty state
1180 void vmw_du_connector_reset(struct drm_connector *connector)
1182 struct vmw_connector_state *vcs;
1185 if (connector->state) {
1186 __drm_atomic_helper_connector_destroy_state(connector->state);
1188 kfree(vmw_connector_state_to_vcs(connector->state));
1191 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1194 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1198 __drm_atomic_helper_connector_reset(connector, &vcs->base);
1203 * vmw_du_connector_destroy_state - destroy connector state
1204 * @connector: DRM connector
1205 * @state: state object to destroy
1207 * Destroys the connector state (both common and vmw-specific) for the
1211 vmw_du_connector_destroy_state(struct drm_connector *connector,
1212 struct drm_connector_state *state)
1214 drm_atomic_helper_connector_destroy_state(connector, state);
1217 * Generic framebuffer code
1221 * Surface framebuffer code
1224 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1226 struct vmw_framebuffer_surface *vfbs =
1227 vmw_framebuffer_to_vfbs(framebuffer);
1229 drm_framebuffer_cleanup(framebuffer);
1230 vmw_surface_unreference(&vfbs->surface);
1236 * vmw_kms_readback - Perform a readback from the screen system to
1237 * a buffer-object backed framebuffer.
1239 * @dev_priv: Pointer to the device private structure.
1240 * @file_priv: Pointer to a struct drm_file identifying the caller.
1241 * Must be set to NULL if @user_fence_rep is NULL.
1242 * @vfb: Pointer to the buffer-object backed framebuffer.
1243 * @user_fence_rep: User-space provided structure for fence information.
1244 * Must be set to non-NULL if @file_priv is non-NULL.
1245 * @vclips: Array of clip rects.
1246 * @num_clips: Number of clip rects in @vclips.
1248 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1251 int vmw_kms_readback(struct vmw_private *dev_priv,
1252 struct drm_file *file_priv,
1253 struct vmw_framebuffer *vfb,
1254 struct drm_vmw_fence_rep __user *user_fence_rep,
1255 struct drm_vmw_rect *vclips,
1258 switch (dev_priv->active_display_unit) {
1259 case vmw_du_screen_object:
1260 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1261 user_fence_rep, vclips, num_clips,
1263 case vmw_du_screen_target:
1264 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1265 user_fence_rep, NULL, vclips, num_clips,
1269 "Readback called with invalid display system.\n");
1276 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1277 .destroy = vmw_framebuffer_surface_destroy,
1278 .dirty = drm_atomic_helper_dirtyfb,
1281 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1282 struct vmw_surface *surface,
1283 struct vmw_framebuffer **out,
1284 const struct drm_mode_fb_cmd2
1289 struct drm_device *dev = &dev_priv->drm;
1290 struct vmw_framebuffer_surface *vfbs;
1291 enum SVGA3dSurfaceFormat format;
1294 /* 3D is only supported on HWv8 and newer hosts */
1295 if (dev_priv->active_display_unit == vmw_du_legacy)
1302 if (!drm_any_plane_has_format(&dev_priv->drm,
1303 mode_cmd->pixel_format,
1304 mode_cmd->modifier[0])) {
1305 drm_dbg(&dev_priv->drm,
1306 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1307 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1311 /* Surface must be marked as a scanout. */
1312 if (unlikely(!surface->metadata.scanout))
1315 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1316 surface->metadata.num_sizes != 1 ||
1317 surface->metadata.base_size.width < mode_cmd->width ||
1318 surface->metadata.base_size.height < mode_cmd->height ||
1319 surface->metadata.base_size.depth != 1)) {
1320 DRM_ERROR("Incompatible surface dimensions "
1321 "for requested mode.\n");
1325 switch (mode_cmd->pixel_format) {
1326 case DRM_FORMAT_ARGB8888:
1327 format = SVGA3D_A8R8G8B8;
1329 case DRM_FORMAT_XRGB8888:
1330 format = SVGA3D_X8R8G8B8;
1332 case DRM_FORMAT_RGB565:
1333 format = SVGA3D_R5G6B5;
1335 case DRM_FORMAT_XRGB1555:
1336 format = SVGA3D_A1R5G5B5;
1339 DRM_ERROR("Invalid pixel format: %p4cc\n",
1340 &mode_cmd->pixel_format);
1345 * For DX, surface format validation is done when surface->scanout
1348 if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1349 DRM_ERROR("Invalid surface format for requested mode.\n");
1353 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1359 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1360 vfbs->surface = vmw_surface_reference(surface);
1361 vfbs->base.user_handle = mode_cmd->handles[0];
1362 vfbs->is_bo_proxy = is_bo_proxy;
1366 ret = drm_framebuffer_init(dev, &vfbs->base.base,
1367 &vmw_framebuffer_surface_funcs);
1374 vmw_surface_unreference(&surface);
1381 * Buffer-object framebuffer code
1384 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1385 struct drm_file *file_priv,
1386 unsigned int *handle)
1388 struct vmw_framebuffer_bo *vfbd =
1389 vmw_framebuffer_to_vfbd(fb);
1391 return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1394 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1396 struct vmw_framebuffer_bo *vfbd =
1397 vmw_framebuffer_to_vfbd(framebuffer);
1399 drm_framebuffer_cleanup(framebuffer);
1400 vmw_bo_unreference(&vfbd->buffer);
1405 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1406 .create_handle = vmw_framebuffer_bo_create_handle,
1407 .destroy = vmw_framebuffer_bo_destroy,
1408 .dirty = drm_atomic_helper_dirtyfb,
1412 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1415 * @mode_cmd: parameters for the new surface
1416 * @bo_mob: MOB backing the buffer object
1417 * @srf_out: newly created surface
1419 * When the content FB is a buffer object, we create a surface as a proxy to the
1420 * same buffer. This way we can do a surface copy rather than a surface DMA.
1421 * This is a more efficient approach
1424 * 0 on success, error code otherwise
1426 static int vmw_create_bo_proxy(struct drm_device *dev,
1427 const struct drm_mode_fb_cmd2 *mode_cmd,
1428 struct vmw_bo *bo_mob,
1429 struct vmw_surface **srf_out)
1431 struct vmw_surface_metadata metadata = {0};
1433 struct vmw_resource *res;
1434 unsigned int bytes_pp;
1437 switch (mode_cmd->pixel_format) {
1438 case DRM_FORMAT_ARGB8888:
1439 case DRM_FORMAT_XRGB8888:
1440 format = SVGA3D_X8R8G8B8;
1444 case DRM_FORMAT_RGB565:
1445 case DRM_FORMAT_XRGB1555:
1446 format = SVGA3D_R5G6B5;
1456 DRM_ERROR("Invalid framebuffer format %p4cc\n",
1457 &mode_cmd->pixel_format);
1461 metadata.format = format;
1462 metadata.mip_levels[0] = 1;
1463 metadata.num_sizes = 1;
1464 metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1465 metadata.base_size.height = mode_cmd->height;
1466 metadata.base_size.depth = 1;
1467 metadata.scanout = true;
1469 ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1471 DRM_ERROR("Failed to allocate proxy content buffer\n");
1475 res = &(*srf_out)->res;
1477 /* Reserve and switch the backing mob. */
1478 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1479 (void) vmw_resource_reserve(res, false, true);
1480 vmw_user_bo_unref(&res->guest_memory_bo);
1481 res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1482 res->guest_memory_offset = 0;
1483 vmw_resource_unreserve(res, false, false, false, NULL, 0);
1484 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1491 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1493 struct vmw_framebuffer **out,
1494 const struct drm_mode_fb_cmd2
1498 struct drm_device *dev = &dev_priv->drm;
1499 struct vmw_framebuffer_bo *vfbd;
1500 unsigned int requested_size;
1503 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1504 if (unlikely(requested_size > bo->tbo.base.size)) {
1505 DRM_ERROR("Screen buffer object size is too small "
1506 "for requested mode.\n");
1510 if (!drm_any_plane_has_format(&dev_priv->drm,
1511 mode_cmd->pixel_format,
1512 mode_cmd->modifier[0])) {
1513 drm_dbg(&dev_priv->drm,
1514 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1515 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1519 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1525 vfbd->base.base.obj[0] = &bo->tbo.base;
1526 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1527 vfbd->base.bo = true;
1528 vfbd->buffer = vmw_bo_reference(bo);
1529 vfbd->base.user_handle = mode_cmd->handles[0];
1532 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1533 &vmw_framebuffer_bo_funcs);
1540 vmw_bo_unreference(&bo);
1548 * vmw_kms_srf_ok - check if a surface can be created
1550 * @dev_priv: Pointer to device private struct.
1551 * @width: requested width
1552 * @height: requested height
1554 * Surfaces need to be less than texture size
1557 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1559 if (width > dev_priv->texture_max_width ||
1560 height > dev_priv->texture_max_height)
1567 * vmw_kms_new_framebuffer - Create a new framebuffer.
1569 * @dev_priv: Pointer to device private struct.
1570 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1571 * Either @bo or @surface must be NULL.
1572 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1573 * Either @bo or @surface must be NULL.
1574 * @only_2d: No presents will occur to this buffer object based framebuffer.
1575 * This helps the code to do some important optimizations.
1576 * @mode_cmd: Frame-buffer metadata.
1578 struct vmw_framebuffer *
1579 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1581 struct vmw_surface *surface,
1583 const struct drm_mode_fb_cmd2 *mode_cmd)
1585 struct vmw_framebuffer *vfb = NULL;
1586 bool is_bo_proxy = false;
1590 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1591 * therefore, wrap the buffer object in a surface so we can use the
1592 * SurfaceCopy command.
1594 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1596 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1597 dev_priv->active_display_unit == vmw_du_screen_target) {
1598 ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1601 return ERR_PTR(ret);
1606 /* Create the new framebuffer depending one what we have */
1608 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1612 * vmw_create_bo_proxy() adds a reference that is no longer
1616 vmw_surface_unreference(&surface);
1618 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1625 return ERR_PTR(ret);
1631 * Generic Kernel modesetting functions
1634 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1635 struct drm_file *file_priv,
1636 const struct drm_mode_fb_cmd2 *mode_cmd)
1638 struct vmw_private *dev_priv = vmw_priv(dev);
1639 struct vmw_framebuffer *vfb = NULL;
1640 struct vmw_surface *surface = NULL;
1641 struct vmw_bo *bo = NULL;
1644 /* returns either a bo or surface */
1645 ret = vmw_user_lookup_handle(dev_priv, file_priv,
1646 mode_cmd->handles[0],
1649 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1650 mode_cmd->handles[0], mode_cmd->handles[0]);
1656 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1657 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1658 dev_priv->texture_max_width,
1659 dev_priv->texture_max_height);
1664 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1665 !(dev_priv->capabilities & SVGA_CAP_3D),
1673 /* vmw_user_lookup_handle takes one ref so does new_fb */
1675 vmw_user_bo_unref(&bo);
1677 vmw_surface_unreference(&surface);
1680 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1681 return ERR_PTR(ret);
1688 * vmw_kms_check_display_memory - Validates display memory required for a
1691 * @num_rects: number of drm_rect in rects
1692 * @rects: array of drm_rect representing the topology to validate indexed by
1696 * 0 on success otherwise negative error code
1698 static int vmw_kms_check_display_memory(struct drm_device *dev,
1700 struct drm_rect *rects)
1702 struct vmw_private *dev_priv = vmw_priv(dev);
1703 struct drm_rect bounding_box = {0};
1704 u64 total_pixels = 0, pixel_mem, bb_mem;
1707 for (i = 0; i < num_rects; i++) {
1709 * For STDU only individual screen (screen target) is limited by
1710 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1712 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1713 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1714 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1715 VMW_DEBUG_KMS("Screen size not supported.\n");
1719 /* Bounding box upper left is at (0,0). */
1720 if (rects[i].x2 > bounding_box.x2)
1721 bounding_box.x2 = rects[i].x2;
1723 if (rects[i].y2 > bounding_box.y2)
1724 bounding_box.y2 = rects[i].y2;
1726 total_pixels += (u64) drm_rect_width(&rects[i]) *
1727 (u64) drm_rect_height(&rects[i]);
1730 /* Virtual svga device primary limits are always in 32-bpp. */
1731 pixel_mem = total_pixels * 4;
1734 * For HV10 and below prim_bb_mem is vram size. When
1735 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1736 * limit on primary bounding box
1738 if (pixel_mem > dev_priv->max_primary_mem) {
1739 VMW_DEBUG_KMS("Combined output size too large.\n");
1743 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1744 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1745 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1746 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1748 if (bb_mem > dev_priv->max_primary_mem) {
1749 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1758 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1760 * @state: The atomic state pointer containing the new atomic state
1763 * This function returns the new crtc state if it's part of the state update.
1764 * Otherwise returns the current crtc state. It also makes sure that the
1765 * crtc mutex is locked.
1767 * Returns: A valid crtc state pointer or NULL. It may also return a
1768 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1770 static struct drm_crtc_state *
1771 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1773 struct drm_crtc_state *crtc_state;
1775 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1777 lockdep_assert_held(&crtc->mutex.mutex.base);
1779 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1781 if (ret != 0 && ret != -EALREADY)
1782 return ERR_PTR(ret);
1784 crtc_state = crtc->state;
1791 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1792 * from the same fb after the new state is committed.
1793 * @dev: The drm_device.
1794 * @state: The new state to be checked.
1798 * -EINVAL on invalid state,
1799 * -EDEADLK if modeset locking needs to be rerun.
1801 static int vmw_kms_check_implicit(struct drm_device *dev,
1802 struct drm_atomic_state *state)
1804 struct drm_framebuffer *implicit_fb = NULL;
1805 struct drm_crtc *crtc;
1806 struct drm_crtc_state *crtc_state;
1807 struct drm_plane_state *plane_state;
1809 drm_for_each_crtc(crtc, dev) {
1810 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1812 if (!du->is_implicit)
1815 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1816 if (IS_ERR(crtc_state))
1817 return PTR_ERR(crtc_state);
1819 if (!crtc_state || !crtc_state->enable)
1823 * Can't move primary planes across crtcs, so this is OK.
1824 * It also means we don't need to take the plane mutex.
1826 plane_state = du->primary.state;
1827 if (plane_state->crtc != crtc)
1831 implicit_fb = plane_state->fb;
1832 else if (implicit_fb != plane_state->fb)
1840 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1842 * @state: the driver state object
1845 * 0 on success otherwise negative error code
1847 static int vmw_kms_check_topology(struct drm_device *dev,
1848 struct drm_atomic_state *state)
1850 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1851 struct drm_rect *rects;
1852 struct drm_crtc *crtc;
1856 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1861 drm_for_each_crtc(crtc, dev) {
1862 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1863 struct drm_crtc_state *crtc_state;
1865 i = drm_crtc_index(crtc);
1867 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1868 if (IS_ERR(crtc_state)) {
1869 ret = PTR_ERR(crtc_state);
1876 if (crtc_state->enable) {
1877 rects[i].x1 = du->gui_x;
1878 rects[i].y1 = du->gui_y;
1879 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1880 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1889 /* Determine change to topology due to new atomic state */
1890 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1891 new_crtc_state, i) {
1892 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1893 struct drm_connector *connector;
1894 struct drm_connector_state *conn_state;
1895 struct vmw_connector_state *vmw_conn_state;
1897 if (!du->pref_active && new_crtc_state->enable) {
1898 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1904 * For vmwgfx each crtc has only one connector attached and it
1905 * is not changed so don't really need to check the
1906 * crtc->connector_mask and iterate over it.
1908 connector = &du->connector;
1909 conn_state = drm_atomic_get_connector_state(state, connector);
1910 if (IS_ERR(conn_state)) {
1911 ret = PTR_ERR(conn_state);
1915 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1916 vmw_conn_state->gui_x = du->gui_x;
1917 vmw_conn_state->gui_y = du->gui_y;
1920 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1929 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1932 * @state: the driver state object
1934 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1935 * us to assign a value to mode->crtc_clock so that
1936 * drm_calc_timestamping_constants() won't throw an error message
1939 * Zero for success or -errno
1942 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1943 struct drm_atomic_state *state)
1945 struct drm_crtc *crtc;
1946 struct drm_crtc_state *crtc_state;
1947 bool need_modeset = false;
1950 ret = drm_atomic_helper_check(dev, state);
1954 ret = vmw_kms_check_implicit(dev, state);
1956 VMW_DEBUG_KMS("Invalid implicit state\n");
1960 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1961 if (drm_atomic_crtc_needs_modeset(crtc_state))
1962 need_modeset = true;
1966 return vmw_kms_check_topology(dev, state);
1971 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1972 .fb_create = vmw_kms_fb_create,
1973 .atomic_check = vmw_kms_atomic_check_modeset,
1974 .atomic_commit = drm_atomic_helper_commit,
1977 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1978 struct drm_file *file_priv,
1979 struct vmw_framebuffer *vfb,
1980 struct vmw_surface *surface,
1982 int32_t destX, int32_t destY,
1983 struct drm_vmw_rect *clips,
1986 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1987 &surface->res, destX, destY,
1988 num_clips, 1, NULL, NULL);
1992 int vmw_kms_present(struct vmw_private *dev_priv,
1993 struct drm_file *file_priv,
1994 struct vmw_framebuffer *vfb,
1995 struct vmw_surface *surface,
1997 int32_t destX, int32_t destY,
1998 struct drm_vmw_rect *clips,
2003 switch (dev_priv->active_display_unit) {
2004 case vmw_du_screen_target:
2005 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2006 &surface->res, destX, destY,
2007 num_clips, 1, NULL, NULL);
2009 case vmw_du_screen_object:
2010 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2011 sid, destX, destY, clips,
2016 "Present called with invalid display system.\n");
2023 vmw_cmd_flush(dev_priv, false);
2029 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2031 if (dev_priv->hotplug_mode_update_property)
2034 dev_priv->hotplug_mode_update_property =
2035 drm_property_create_range(&dev_priv->drm,
2036 DRM_MODE_PROP_IMMUTABLE,
2037 "hotplug_mode_update", 0, 1);
2040 int vmw_kms_init(struct vmw_private *dev_priv)
2042 struct drm_device *dev = &dev_priv->drm;
2044 static const char *display_unit_names[] = {
2052 drm_mode_config_init(dev);
2053 dev->mode_config.funcs = &vmw_kms_funcs;
2054 dev->mode_config.min_width = 1;
2055 dev->mode_config.min_height = 1;
2056 dev->mode_config.max_width = dev_priv->texture_max_width;
2057 dev->mode_config.max_height = dev_priv->texture_max_height;
2058 dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2060 drm_mode_create_suggested_offset_properties(dev);
2061 vmw_kms_create_hotplug_mode_update_property(dev_priv);
2063 ret = vmw_kms_stdu_init_display(dev_priv);
2065 ret = vmw_kms_sou_init_display(dev_priv);
2066 if (ret) /* Fallback */
2067 ret = vmw_kms_ldu_init_display(dev_priv);
2069 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2070 drm_info(&dev_priv->drm, "%s display unit initialized\n",
2071 display_unit_names[dev_priv->active_display_unit]);
2076 int vmw_kms_close(struct vmw_private *dev_priv)
2081 * Docs says we should take the lock before calling this function
2082 * but since it destroys encoders and our destructor calls
2083 * drm_encoder_cleanup which takes the lock we deadlock.
2085 drm_mode_config_cleanup(&dev_priv->drm);
2086 if (dev_priv->active_display_unit == vmw_du_legacy)
2087 ret = vmw_kms_ldu_close_display(dev_priv);
2092 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2093 struct drm_file *file_priv)
2095 struct drm_vmw_cursor_bypass_arg *arg = data;
2096 struct vmw_display_unit *du;
2097 struct drm_crtc *crtc;
2100 mutex_lock(&dev->mode_config.mutex);
2101 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2103 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2104 du = vmw_crtc_to_du(crtc);
2105 du->hotspot_x = arg->xhot;
2106 du->hotspot_y = arg->yhot;
2109 mutex_unlock(&dev->mode_config.mutex);
2113 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2119 du = vmw_crtc_to_du(crtc);
2121 du->hotspot_x = arg->xhot;
2122 du->hotspot_y = arg->yhot;
2125 mutex_unlock(&dev->mode_config.mutex);
2130 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2131 unsigned width, unsigned height, unsigned pitch,
2132 unsigned bpp, unsigned depth)
2134 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2135 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2136 else if (vmw_fifo_have_pitchlock(vmw_priv))
2137 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2138 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2139 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2140 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2141 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2143 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2144 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2145 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2152 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2156 return ((u64) pitch * (u64) height) < (u64)
2157 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
2158 dev_priv->max_primary_mem : dev_priv->vram_size);
2162 * vmw_du_update_layout - Update the display unit with topology from resolution
2163 * plugin and generate DRM uevent
2164 * @dev_priv: device private
2165 * @num_rects: number of drm_rect in rects
2166 * @rects: toplogy to update
2168 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2169 unsigned int num_rects, struct drm_rect *rects)
2171 struct drm_device *dev = &dev_priv->drm;
2172 struct vmw_display_unit *du;
2173 struct drm_connector *con;
2174 struct drm_connector_list_iter conn_iter;
2175 struct drm_modeset_acquire_ctx ctx;
2176 struct drm_crtc *crtc;
2179 /* Currently gui_x/y is protected with the crtc mutex */
2180 mutex_lock(&dev->mode_config.mutex);
2181 drm_modeset_acquire_init(&ctx, 0);
2183 drm_for_each_crtc(crtc, dev) {
2184 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2186 if (ret == -EDEADLK) {
2187 drm_modeset_backoff(&ctx);
2194 drm_connector_list_iter_begin(dev, &conn_iter);
2195 drm_for_each_connector_iter(con, &conn_iter) {
2196 du = vmw_connector_to_du(con);
2197 if (num_rects > du->unit) {
2198 du->pref_width = drm_rect_width(&rects[du->unit]);
2199 du->pref_height = drm_rect_height(&rects[du->unit]);
2200 du->pref_active = true;
2201 du->gui_x = rects[du->unit].x1;
2202 du->gui_y = rects[du->unit].y1;
2204 du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
2205 du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2206 du->pref_active = false;
2211 drm_connector_list_iter_end(&conn_iter);
2213 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2214 du = vmw_connector_to_du(con);
2215 if (num_rects > du->unit) {
2216 drm_object_property_set_value
2217 (&con->base, dev->mode_config.suggested_x_property,
2219 drm_object_property_set_value
2220 (&con->base, dev->mode_config.suggested_y_property,
2223 drm_object_property_set_value
2224 (&con->base, dev->mode_config.suggested_x_property,
2226 drm_object_property_set_value
2227 (&con->base, dev->mode_config.suggested_y_property,
2230 con->status = vmw_du_connector_detect(con, true);
2233 drm_modeset_drop_locks(&ctx);
2234 drm_modeset_acquire_fini(&ctx);
2235 mutex_unlock(&dev->mode_config.mutex);
2237 drm_sysfs_hotplug_event(dev);
2242 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2243 u16 *r, u16 *g, u16 *b,
2245 struct drm_modeset_acquire_ctx *ctx)
2247 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2250 for (i = 0; i < size; i++) {
2251 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2253 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2254 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2255 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2261 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2266 enum drm_connector_status
2267 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2269 uint32_t num_displays;
2270 struct drm_device *dev = connector->dev;
2271 struct vmw_private *dev_priv = vmw_priv(dev);
2272 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2274 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2276 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2278 connector_status_connected : connector_status_disconnected);
2281 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2283 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2284 752, 800, 0, 480, 489, 492, 525, 0,
2285 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2287 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2288 968, 1056, 0, 600, 601, 605, 628, 0,
2289 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2291 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2292 1184, 1344, 0, 768, 771, 777, 806, 0,
2293 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2295 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2296 1344, 1600, 0, 864, 865, 868, 900, 0,
2297 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2299 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2300 1472, 1664, 0, 720, 723, 728, 748, 0,
2301 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2303 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2304 1472, 1664, 0, 768, 771, 778, 798, 0,
2305 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2307 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2308 1480, 1680, 0, 800, 803, 809, 831, 0,
2309 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2311 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2312 1488, 1800, 0, 960, 961, 964, 1000, 0,
2313 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2314 /* 1280x1024@60Hz */
2315 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2316 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2317 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2319 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2320 1536, 1792, 0, 768, 771, 777, 795, 0,
2321 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2322 /* 1440x1050@60Hz */
2323 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2324 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2325 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2327 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2328 1672, 1904, 0, 900, 903, 909, 934, 0,
2329 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2330 /* 1600x1200@60Hz */
2331 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2332 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2333 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2334 /* 1680x1050@60Hz */
2335 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2336 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2337 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2338 /* 1792x1344@60Hz */
2339 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2340 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2341 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2342 /* 1853x1392@60Hz */
2343 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2344 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2345 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2346 /* 1920x1080@60Hz */
2347 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2348 2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2349 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2350 /* 1920x1200@60Hz */
2351 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2352 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2353 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2354 /* 1920x1440@60Hz */
2355 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2356 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2357 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2358 /* 2560x1440@60Hz */
2359 { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2360 2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2361 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2362 /* 2560x1600@60Hz */
2363 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2364 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2365 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2366 /* 2880x1800@60Hz */
2367 { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2368 2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2369 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2370 /* 3840x2160@60Hz */
2371 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2372 3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2373 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2374 /* 3840x2400@60Hz */
2375 { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2376 3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2377 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2379 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2383 * vmw_guess_mode_timing - Provide fake timings for a
2384 * 60Hz vrefresh mode.
2386 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2387 * members filled in.
2389 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2391 mode->hsync_start = mode->hdisplay + 50;
2392 mode->hsync_end = mode->hsync_start + 50;
2393 mode->htotal = mode->hsync_end + 50;
2395 mode->vsync_start = mode->vdisplay + 50;
2396 mode->vsync_end = mode->vsync_start + 50;
2397 mode->vtotal = mode->vsync_end + 50;
2399 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2403 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2404 uint32_t max_width, uint32_t max_height)
2406 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2407 struct drm_device *dev = connector->dev;
2408 struct vmw_private *dev_priv = vmw_priv(dev);
2409 struct drm_display_mode *mode = NULL;
2410 struct drm_display_mode *bmode;
2411 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2412 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2413 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2414 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2417 u32 assumed_bpp = 4;
2419 if (dev_priv->assume_16bpp)
2422 max_width = min(max_width, dev_priv->texture_max_width);
2423 max_height = min(max_height, dev_priv->texture_max_height);
2426 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2429 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2430 max_width = min(max_width, dev_priv->stdu_max_width);
2431 max_height = min(max_height, dev_priv->stdu_max_height);
2434 /* Add preferred mode */
2435 mode = drm_mode_duplicate(dev, &prefmode);
2438 mode->hdisplay = du->pref_width;
2439 mode->vdisplay = du->pref_height;
2440 vmw_guess_mode_timing(mode);
2441 drm_mode_set_name(mode);
2443 if (vmw_kms_validate_mode_vram(dev_priv,
2444 mode->hdisplay * assumed_bpp,
2446 drm_mode_probed_add(connector, mode);
2448 drm_mode_destroy(dev, mode);
2452 if (du->pref_mode) {
2453 list_del_init(&du->pref_mode->head);
2454 drm_mode_destroy(dev, du->pref_mode);
2457 /* mode might be null here, this is intended */
2458 du->pref_mode = mode;
2460 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2461 bmode = &vmw_kms_connector_builtin[i];
2462 if (bmode->hdisplay > max_width ||
2463 bmode->vdisplay > max_height)
2466 if (!vmw_kms_validate_mode_vram(dev_priv,
2467 bmode->hdisplay * assumed_bpp,
2471 mode = drm_mode_duplicate(dev, bmode);
2475 drm_mode_probed_add(connector, mode);
2478 drm_connector_list_update(connector);
2479 /* Move the prefered mode first, help apps pick the right mode. */
2480 drm_mode_sort(&connector->modes);
2486 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2487 * @dev: drm device for the ioctl
2488 * @data: data pointer for the ioctl
2489 * @file_priv: drm file for the ioctl call
2491 * Update preferred topology of display unit as per ioctl request. The topology
2492 * is expressed as array of drm_vmw_rect.
2494 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2497 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2498 * device limit on topology, x + w and y + h (lower right) cannot be greater
2499 * than INT_MAX. So topology beyond these limits will return with error.
2502 * Zero on success, negative errno on failure.
2504 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2505 struct drm_file *file_priv)
2507 struct vmw_private *dev_priv = vmw_priv(dev);
2508 struct drm_mode_config *mode_config = &dev->mode_config;
2509 struct drm_vmw_update_layout_arg *arg =
2510 (struct drm_vmw_update_layout_arg *)data;
2511 void __user *user_rects;
2512 struct drm_vmw_rect *rects;
2513 struct drm_rect *drm_rects;
2514 unsigned rects_size;
2517 if (!arg->num_outputs) {
2518 struct drm_rect def_rect = {0, 0,
2519 VMWGFX_MIN_INITIAL_WIDTH,
2520 VMWGFX_MIN_INITIAL_HEIGHT};
2521 vmw_du_update_layout(dev_priv, 1, &def_rect);
2525 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2526 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2528 if (unlikely(!rects))
2531 user_rects = (void __user *)(unsigned long)arg->rects;
2532 ret = copy_from_user(rects, user_rects, rects_size);
2533 if (unlikely(ret != 0)) {
2534 DRM_ERROR("Failed to get rects.\n");
2539 drm_rects = (struct drm_rect *)rects;
2541 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2542 for (i = 0; i < arg->num_outputs; i++) {
2543 struct drm_vmw_rect curr_rect;
2545 /* Verify user-space for overflow as kernel use drm_rect */
2546 if ((rects[i].x + rects[i].w > INT_MAX) ||
2547 (rects[i].y + rects[i].h > INT_MAX)) {
2552 curr_rect = rects[i];
2553 drm_rects[i].x1 = curr_rect.x;
2554 drm_rects[i].y1 = curr_rect.y;
2555 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2556 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2558 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2559 drm_rects[i].x1, drm_rects[i].y1,
2560 drm_rects[i].x2, drm_rects[i].y2);
2563 * Currently this check is limiting the topology within
2564 * mode_config->max (which actually is max texture size
2565 * supported by virtual device). This limit is here to address
2566 * window managers that create a big framebuffer for whole
2569 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2570 drm_rects[i].x2 > mode_config->max_width ||
2571 drm_rects[i].y2 > mode_config->max_height) {
2572 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2573 drm_rects[i].x1, drm_rects[i].y1,
2574 drm_rects[i].x2, drm_rects[i].y2);
2580 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2583 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2591 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2592 * on a set of cliprects and a set of display units.
2594 * @dev_priv: Pointer to a device private structure.
2595 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2596 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2597 * Cliprects are given in framebuffer coordinates.
2598 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2599 * be NULL. Cliprects are given in source coordinates.
2600 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2601 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2602 * @num_clips: Number of cliprects in the @clips or @vclips array.
2603 * @increment: Integer with which to increment the clip counter when looping.
2604 * Used to skip a predetermined number of clip rects.
2605 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2607 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2608 struct vmw_framebuffer *framebuffer,
2609 const struct drm_clip_rect *clips,
2610 const struct drm_vmw_rect *vclips,
2611 s32 dest_x, s32 dest_y,
2614 struct vmw_kms_dirty *dirty)
2616 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2617 struct drm_crtc *crtc;
2621 dirty->dev_priv = dev_priv;
2623 /* If crtc is passed, no need to iterate over other display units */
2625 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2627 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2629 struct drm_plane *plane = crtc->primary;
2631 if (plane->state->fb == &framebuffer->base)
2632 units[num_units++] = vmw_crtc_to_du(crtc);
2636 for (k = 0; k < num_units; k++) {
2637 struct vmw_display_unit *unit = units[k];
2638 s32 crtc_x = unit->crtc.x;
2639 s32 crtc_y = unit->crtc.y;
2640 s32 crtc_width = unit->crtc.mode.hdisplay;
2641 s32 crtc_height = unit->crtc.mode.vdisplay;
2642 const struct drm_clip_rect *clips_ptr = clips;
2643 const struct drm_vmw_rect *vclips_ptr = vclips;
2646 if (dirty->fifo_reserve_size > 0) {
2647 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2648 dirty->fifo_reserve_size);
2652 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2654 dirty->num_hits = 0;
2655 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2656 vclips_ptr += increment) {
2661 * Select clip array type. Note that integer type
2662 * in @clips is unsigned short, whereas in @vclips
2666 dirty->fb_x = (s32) clips_ptr->x1;
2667 dirty->fb_y = (s32) clips_ptr->y1;
2668 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2670 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2673 dirty->fb_x = vclips_ptr->x;
2674 dirty->fb_y = vclips_ptr->y;
2675 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2677 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2681 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2682 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2684 /* Skip this clip if it's outside the crtc region */
2685 if (dirty->unit_x1 >= crtc_width ||
2686 dirty->unit_y1 >= crtc_height ||
2687 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2690 /* Clip right and bottom to crtc limits */
2691 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2693 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2696 /* Clip left and top to crtc limits */
2697 clip_left = min_t(s32, dirty->unit_x1, 0);
2698 clip_top = min_t(s32, dirty->unit_y1, 0);
2699 dirty->unit_x1 -= clip_left;
2700 dirty->unit_y1 -= clip_top;
2701 dirty->fb_x -= clip_left;
2702 dirty->fb_y -= clip_top;
2707 dirty->fifo_commit(dirty);
2714 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2715 * cleanup and fencing
2716 * @dev_priv: Pointer to the device-private struct
2717 * @file_priv: Pointer identifying the client when user-space fencing is used
2718 * @ctx: Pointer to the validation context
2719 * @out_fence: If non-NULL, returned refcounted fence-pointer
2720 * @user_fence_rep: If non-NULL, pointer to user-space address area
2721 * in which to copy user-space fence info
2723 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2724 struct drm_file *file_priv,
2725 struct vmw_validation_context *ctx,
2726 struct vmw_fence_obj **out_fence,
2727 struct drm_vmw_fence_rep __user *
2730 struct vmw_fence_obj *fence = NULL;
2731 uint32_t handle = 0;
2734 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2736 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2737 file_priv ? &handle : NULL);
2738 vmw_validation_done(ctx, fence);
2740 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2741 ret, user_fence_rep, fence,
2746 vmw_fence_obj_unreference(&fence);
2750 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2753 * @res: Pointer to the surface resource
2754 * @clips: Clip rects in framebuffer (surface) space.
2755 * @num_clips: Number of clips in @clips.
2756 * @increment: Integer with which to increment the clip counter when looping.
2757 * Used to skip a predetermined number of clip rects.
2759 * This function makes sure the proxy surface is updated from its backing MOB
2760 * using the region given by @clips. The surface resource @res and its backing
2761 * MOB needs to be reserved and validated on call.
2763 int vmw_kms_update_proxy(struct vmw_resource *res,
2764 const struct drm_clip_rect *clips,
2768 struct vmw_private *dev_priv = res->dev_priv;
2769 struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2771 SVGA3dCmdHeader header;
2772 SVGA3dCmdUpdateGBImage body;
2775 size_t copy_size = 0;
2781 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2785 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2786 box = &cmd->body.box;
2788 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2789 cmd->header.size = sizeof(cmd->body);
2790 cmd->body.image.sid = res->id;
2791 cmd->body.image.face = 0;
2792 cmd->body.image.mipmap = 0;
2794 if (clips->x1 > size->width || clips->x2 > size->width ||
2795 clips->y1 > size->height || clips->y2 > size->height) {
2796 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2803 box->w = clips->x2 - clips->x1;
2804 box->h = clips->y2 - clips->y1;
2807 copy_size += sizeof(*cmd);
2810 vmw_cmd_commit(dev_priv, copy_size);
2816 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2819 * @dev_priv: Pointer to a device private struct.
2821 * Sets up the implicit placement property unless it's already set up.
2824 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2826 if (dev_priv->implicit_placement_property)
2829 dev_priv->implicit_placement_property =
2830 drm_property_create_range(&dev_priv->drm,
2831 DRM_MODE_PROP_IMMUTABLE,
2832 "implicit_placement", 0, 1);
2836 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2838 * @dev: Pointer to the drm device
2839 * Return: 0 on success. Negative error code on failure.
2841 int vmw_kms_suspend(struct drm_device *dev)
2843 struct vmw_private *dev_priv = vmw_priv(dev);
2845 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2846 if (IS_ERR(dev_priv->suspend_state)) {
2847 int ret = PTR_ERR(dev_priv->suspend_state);
2849 DRM_ERROR("Failed kms suspend: %d\n", ret);
2850 dev_priv->suspend_state = NULL;
2860 * vmw_kms_resume - Re-enable modesetting and restore state
2862 * @dev: Pointer to the drm device
2863 * Return: 0 on success. Negative error code on failure.
2865 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2866 * to call this function without a previous vmw_kms_suspend().
2868 int vmw_kms_resume(struct drm_device *dev)
2870 struct vmw_private *dev_priv = vmw_priv(dev);
2873 if (WARN_ON(!dev_priv->suspend_state))
2876 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2877 dev_priv->suspend_state = NULL;
2883 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2885 * @dev: Pointer to the drm device
2887 void vmw_kms_lost_device(struct drm_device *dev)
2889 drm_atomic_helper_shutdown(dev);
2893 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2894 * @update: The closure structure.
2896 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2897 * update on display unit.
2899 * Return: 0 on success or a negative error code on failure.
2901 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2903 struct drm_plane_state *state = update->plane->state;
2904 struct drm_plane_state *old_state = update->old_state;
2905 struct drm_atomic_helper_damage_iter iter;
2906 struct drm_rect clip;
2908 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2909 uint32_t reserved_size = 0;
2910 uint32_t submit_size = 0;
2911 uint32_t curr_size = 0;
2912 uint32_t num_hits = 0;
2918 * Iterate in advance to check if really need plane update and find the
2919 * number of clips that actually are in plane src for fifo allocation.
2921 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2922 drm_atomic_for_each_plane_damage(&iter, &clip)
2928 if (update->vfb->bo) {
2929 struct vmw_framebuffer_bo *vfbbo =
2930 container_of(update->vfb, typeof(*vfbbo), base);
2933 * For screen targets we want a mappable bo, for everything else we want
2934 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2935 * is not screen target then mob's shouldn't be available.
2937 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2938 vmw_bo_placement_set(vfbbo->buffer,
2939 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2940 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2942 WARN_ON(update->dev_priv->has_mob);
2943 vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2945 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2947 struct vmw_framebuffer_surface *vfbs =
2948 container_of(update->vfb, typeof(*vfbs), base);
2950 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2951 0, VMW_RES_DIRTY_NONE, NULL,
2958 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2962 reserved_size = update->calc_fifo_size(update, num_hits);
2963 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2969 cmd_next = cmd_start;
2971 if (update->post_prepare) {
2972 curr_size = update->post_prepare(update, cmd_next);
2973 cmd_next += curr_size;
2974 submit_size += curr_size;
2977 if (update->pre_clip) {
2978 curr_size = update->pre_clip(update, cmd_next, num_hits);
2979 cmd_next += curr_size;
2980 submit_size += curr_size;
2988 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2989 drm_atomic_for_each_plane_damage(&iter, &clip) {
2990 uint32_t fb_x = clip.x1;
2991 uint32_t fb_y = clip.y1;
2993 vmw_du_translate_to_crtc(state, &clip);
2995 curr_size = update->clip(update, cmd_next, &clip, fb_x,
2997 cmd_next += curr_size;
2998 submit_size += curr_size;
3000 bb.x1 = min_t(int, bb.x1, clip.x1);
3001 bb.y1 = min_t(int, bb.y1, clip.y1);
3002 bb.x2 = max_t(int, bb.x2, clip.x2);
3003 bb.y2 = max_t(int, bb.y2, clip.y2);
3006 curr_size = update->post_clip(update, cmd_next, &bb);
3007 submit_size += curr_size;
3009 if (reserved_size < submit_size)
3012 vmw_cmd_commit(update->dev_priv, submit_size);
3014 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3015 update->out_fence, NULL);
3019 vmw_validation_revert(&val_ctx);
3022 vmw_validation_unref_lists(&val_ctx);