1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
28 #include "vmwgfx_kms.h"
30 #include "vmwgfx_bo.h"
31 #include "vmwgfx_vkms.h"
32 #include "vmw_surface_cache.h"
34 #include <drm/drm_atomic.h>
35 #include <drm/drm_atomic_helper.h>
36 #include <drm/drm_damage_helper.h>
37 #include <drm/drm_fourcc.h>
38 #include <drm/drm_rect.h>
39 #include <drm/drm_sysfs.h>
40 #include <drm/drm_edid.h>
42 void vmw_du_init(struct vmw_display_unit *du)
44 vmw_vkms_crtc_init(&du->crtc);
47 void vmw_du_cleanup(struct vmw_display_unit *du)
49 struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
51 vmw_vkms_crtc_cleanup(&du->crtc);
52 drm_plane_cleanup(&du->primary);
53 if (vmw_cmd_supported(dev_priv))
54 drm_plane_cleanup(&du->cursor.base);
56 drm_connector_unregister(&du->connector);
57 drm_crtc_cleanup(&du->crtc);
58 drm_encoder_cleanup(&du->encoder);
59 drm_connector_cleanup(&du->connector);
63 * Display Unit Cursor functions
66 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
67 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
68 struct vmw_plane_state *vps,
69 u32 *image, u32 width, u32 height,
70 u32 hotspotX, u32 hotspotY);
72 struct vmw_svga_fifo_cmd_define_cursor {
74 SVGAFifoCmdDefineAlphaCursor cursor;
78 * vmw_send_define_cursor_cmd - queue a define cursor command
79 * @dev_priv: the private driver struct
80 * @image: buffer which holds the cursor image
81 * @width: width of the mouse cursor image
82 * @height: height of the mouse cursor image
83 * @hotspotX: the horizontal position of mouse hotspot
84 * @hotspotY: the vertical position of mouse hotspot
86 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
87 u32 *image, u32 width, u32 height,
88 u32 hotspotX, u32 hotspotY)
90 struct vmw_svga_fifo_cmd_define_cursor *cmd;
91 const u32 image_size = width * height * sizeof(*image);
92 const u32 cmd_size = sizeof(*cmd) + image_size;
94 /* Try to reserve fifocmd space and swallow any failures;
95 such reservations cannot be left unconsumed for long
96 under the risk of clogging other fifocmd users, so
97 we treat reservations separtely from the way we treat
98 other fallible KMS-atomic resources at prepare_fb */
99 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
104 memset(cmd, 0, sizeof(*cmd));
106 memcpy(&cmd[1], image, image_size);
108 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
110 cmd->cursor.width = width;
111 cmd->cursor.height = height;
112 cmd->cursor.hotspotX = hotspotX;
113 cmd->cursor.hotspotY = hotspotY;
115 vmw_cmd_commit_flush(dev_priv, cmd_size);
119 * vmw_cursor_update_image - update the cursor image on the provided plane
120 * @dev_priv: the private driver struct
121 * @vps: the plane state of the cursor plane
122 * @image: buffer which holds the cursor image
123 * @width: width of the mouse cursor image
124 * @height: height of the mouse cursor image
125 * @hotspotX: the horizontal position of mouse hotspot
126 * @hotspotY: the vertical position of mouse hotspot
128 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
129 struct vmw_plane_state *vps,
130 u32 *image, u32 width, u32 height,
131 u32 hotspotX, u32 hotspotY)
134 vmw_cursor_update_mob(dev_priv, vps, image,
135 vps->base.crtc_w, vps->base.crtc_h,
139 vmw_send_define_cursor_cmd(dev_priv, image, width, height,
145 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
147 * Called from inside vmw_du_cursor_plane_atomic_update to actually
148 * make the cursor-image live.
150 * @dev_priv: device to work with
151 * @vps: the plane state of the cursor plane
152 * @image: cursor source data to fill the MOB with
153 * @width: source data width
154 * @height: source data height
155 * @hotspotX: cursor hotspot x
156 * @hotspotY: cursor hotspot Y
158 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
159 struct vmw_plane_state *vps,
160 u32 *image, u32 width, u32 height,
161 u32 hotspotX, u32 hotspotY)
163 SVGAGBCursorHeader *header;
164 SVGAGBAlphaCursorHeader *alpha_header;
165 const u32 image_size = width * height * sizeof(*image);
167 header = vmw_bo_map_and_cache(vps->cursor.bo);
168 alpha_header = &header->header.alphaHeader;
170 memset(header, 0, sizeof(*header));
172 header->type = SVGA_ALPHA_CURSOR;
173 header->sizeInBytes = image_size;
175 alpha_header->hotspotX = hotspotX;
176 alpha_header->hotspotY = hotspotY;
177 alpha_header->width = width;
178 alpha_header->height = height;
180 memcpy(header + 1, image, image_size);
181 vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
182 vps->cursor.bo->tbo.resource->start);
186 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
188 return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
192 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
193 * @vps: cursor plane state
195 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
197 struct vmw_surface *surf;
199 if (vmw_user_object_is_null(&vps->uo))
202 surf = vmw_user_object_surface(&vps->uo);
203 if (surf && !vmw_user_object_is_mapped(&vps->uo))
204 return surf->snooper.image;
206 return vmw_user_object_map(&vps->uo);
209 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
210 struct vmw_plane_state *new_vps)
217 if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
218 old_vps->base.crtc_h != new_vps->base.crtc_h)
221 if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
222 old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
225 size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
227 old_image = vmw_du_cursor_plane_acquire_image(old_vps);
228 new_image = vmw_du_cursor_plane_acquire_image(new_vps);
231 if (old_image && new_image && old_image != new_image)
232 changed = memcmp(old_image, new_image, size) != 0;
237 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
242 ttm_bo_unpin(&(*vbo)->tbo);
243 vmw_bo_unreference(vbo);
246 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
247 struct vmw_plane_state *vps)
254 vmw_du_cursor_plane_unmap_cm(vps);
256 /* Look for a free slot to return this mob to the cache. */
257 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
258 if (!vcp->cursor_mobs[i]) {
259 vcp->cursor_mobs[i] = vps->cursor.bo;
260 vps->cursor.bo = NULL;
265 /* Cache is full: See if this mob is bigger than an existing mob. */
266 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
267 if (vcp->cursor_mobs[i]->tbo.base.size <
268 vps->cursor.bo->tbo.base.size) {
269 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
270 vcp->cursor_mobs[i] = vps->cursor.bo;
271 vps->cursor.bo = NULL;
276 /* Destroy it if it's not worth caching. */
277 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
280 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
281 struct vmw_plane_state *vps)
283 struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
284 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
286 u32 cursor_max_dim, mob_max_size;
287 struct vmw_fence_obj *fence = NULL;
290 if (!dev_priv->has_mob ||
291 (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
294 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
295 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
297 if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
298 vps->base.crtc_h > cursor_max_dim)
301 if (vps->cursor.bo) {
302 if (vps->cursor.bo->tbo.base.size >= size)
304 vmw_du_put_cursor_mob(vcp, vps);
307 /* Look for an unused mob in the cache. */
308 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
309 if (vcp->cursor_mobs[i] &&
310 vcp->cursor_mobs[i]->tbo.base.size >= size) {
311 vps->cursor.bo = vcp->cursor_mobs[i];
312 vcp->cursor_mobs[i] = NULL;
316 /* Create a new mob if we can't find an existing one. */
317 ret = vmw_bo_create_and_populate(dev_priv, size,
324 /* Fence the mob creation so we are guarateed to have the mob */
325 ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
329 ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
331 ttm_bo_unreserve(&vps->cursor.bo->tbo);
335 dma_fence_wait(&fence->base, false);
336 dma_fence_put(&fence->base);
338 ttm_bo_unreserve(&vps->cursor.bo->tbo);
342 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
347 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
348 bool show, int x, int y)
350 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
351 : SVGA_CURSOR_ON_HIDE;
354 spin_lock(&dev_priv->cursor_lock);
355 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
356 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
357 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
358 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
359 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
360 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
361 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
362 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
363 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
364 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
365 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
366 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
368 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
369 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
370 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
372 spin_unlock(&dev_priv->cursor_lock);
375 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
376 struct ttm_object_file *tfile,
377 struct ttm_buffer_object *bo,
378 SVGA3dCmdHeader *header)
380 struct ttm_bo_kmap_obj map;
381 unsigned long kmap_offset;
382 unsigned long kmap_num;
388 SVGA3dCmdHeader header;
389 SVGA3dCmdSurfaceDMA dma;
392 const struct SVGA3dSurfaceDesc *desc =
393 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
394 const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
396 cmd = container_of(header, struct vmw_dma_cmd, header);
398 /* No snooper installed, nothing to copy */
399 if (!srf->snooper.image)
402 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
403 DRM_ERROR("face and mipmap for cursors should never != 0\n");
407 if (cmd->header.size < 64) {
408 DRM_ERROR("at least one full copy box must be given\n");
412 box = (SVGA3dCopyBox *)&cmd[1];
413 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
414 sizeof(SVGA3dCopyBox);
416 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
417 box->x != 0 || box->y != 0 || box->z != 0 ||
418 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
419 box->d != 1 || box_count != 1 ||
420 box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
421 /* TODO handle none page aligned offsets */
422 /* TODO handle more dst & src != 0 */
423 /* TODO handle more then one copy */
424 DRM_ERROR("Can't snoop dma request for cursor!\n");
425 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
426 box->srcx, box->srcy, box->srcz,
427 box->x, box->y, box->z,
428 box->w, box->h, box->d, box_count,
429 cmd->dma.guest.ptr.offset);
433 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
434 kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
436 ret = ttm_bo_reserve(bo, true, false, NULL);
437 if (unlikely(ret != 0)) {
438 DRM_ERROR("reserve failed\n");
442 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
443 if (unlikely(ret != 0))
446 virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
448 if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
449 memcpy(srf->snooper.image, virtual,
450 VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
452 /* Image is unsigned pointer. */
453 for (i = 0; i < box->h; i++)
454 memcpy(srf->snooper.image + i * image_pitch,
455 virtual + i * cmd->dma.guest.pitch,
456 box->w * desc->pitchBytesPerBlock);
463 ttm_bo_unreserve(bo);
467 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
469 * @dev_priv: Pointer to the device private struct.
471 * Clears all legacy hotspots.
473 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
475 struct drm_device *dev = &dev_priv->drm;
476 struct vmw_display_unit *du;
477 struct drm_crtc *crtc;
479 drm_modeset_lock_all(dev);
480 drm_for_each_crtc(crtc, dev) {
481 du = vmw_crtc_to_du(crtc);
486 drm_modeset_unlock_all(dev);
489 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
491 struct drm_device *dev = &dev_priv->drm;
492 struct vmw_display_unit *du;
493 struct drm_crtc *crtc;
495 mutex_lock(&dev->mode_config.mutex);
497 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
498 du = vmw_crtc_to_du(crtc);
499 if (!du->cursor_surface ||
500 du->cursor_age == du->cursor_surface->snooper.age ||
501 !du->cursor_surface->snooper.image)
504 du->cursor_age = du->cursor_surface->snooper.age;
505 vmw_send_define_cursor_cmd(dev_priv,
506 du->cursor_surface->snooper.image,
507 VMW_CURSOR_SNOOP_WIDTH,
508 VMW_CURSOR_SNOOP_HEIGHT,
509 du->hotspot_x + du->core_hotspot_x,
510 du->hotspot_y + du->core_hotspot_y);
513 mutex_unlock(&dev->mode_config.mutex);
517 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
519 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
522 vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
524 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
525 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
527 drm_plane_cleanup(plane);
531 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
533 drm_plane_cleanup(plane);
535 /* Planes are static in our case so we don't free it */
540 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
542 * @vps: plane state associated with the display surface
544 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
546 struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
550 vmw_resource_unpin(&surf->res);
558 * vmw_du_plane_cleanup_fb - Unpins the plane surface
560 * @plane: display plane
561 * @old_state: Contains the FB to clean up
563 * Unpins the framebuffer surface
565 * Returns 0 on success
568 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
569 struct drm_plane_state *old_state)
571 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
573 vmw_du_plane_unpin_surf(vps);
578 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
582 * Returns 0 on success
586 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
589 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
590 struct ttm_buffer_object *bo;
595 bo = &vps->cursor.bo->tbo;
597 if (bo->base.size < size)
600 if (vps->cursor.bo->map.virtual)
603 ret = ttm_bo_reserve(bo, false, false, NULL);
604 if (unlikely(ret != 0))
607 vmw_bo_map_and_cache(vps->cursor.bo);
609 ttm_bo_unreserve(bo);
611 if (unlikely(ret != 0))
619 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
621 * @vps: state of the cursor plane
623 * Returns 0 on success
627 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
630 struct vmw_bo *vbo = vps->cursor.bo;
632 if (!vbo || !vbo->map.virtual)
635 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
636 if (likely(ret == 0)) {
638 ttm_bo_unreserve(&vbo->tbo);
646 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
648 * @plane: cursor plane
649 * @old_state: contains the state to clean up
651 * Unmaps all cursor bo mappings and unpins the cursor surface
653 * Returns 0 on success
656 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
657 struct drm_plane_state *old_state)
659 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
660 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
662 if (!vmw_user_object_is_null(&vps->uo))
663 vmw_user_object_unmap(&vps->uo);
665 vmw_du_cursor_plane_unmap_cm(vps);
666 vmw_du_put_cursor_mob(vcp, vps);
668 vmw_du_plane_unpin_surf(vps);
669 vmw_user_object_unref(&vps->uo);
674 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
676 * @plane: display plane
677 * @new_state: info on the new plane state, including the FB
679 * Returns 0 on success
682 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
683 struct drm_plane_state *new_state)
685 struct drm_framebuffer *fb = new_state->fb;
686 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
687 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
688 struct vmw_bo *bo = NULL;
691 if (!vmw_user_object_is_null(&vps->uo)) {
692 vmw_user_object_unmap(&vps->uo);
693 vmw_user_object_unref(&vps->uo);
697 if (vmw_framebuffer_to_vfb(fb)->bo) {
698 vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
699 vps->uo.surface = NULL;
701 memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
703 vmw_user_object_ref(&vps->uo);
706 bo = vmw_user_object_buffer(&vps->uo);
708 struct ttm_operation_ctx ctx = {false, false};
710 ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
714 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
718 vmw_bo_pin_reserved(bo, true);
719 if (vmw_framebuffer_to_vfb(fb)->bo) {
720 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
722 (void)vmw_bo_map_and_cache_size(bo, size);
724 vmw_bo_map_and_cache(bo);
726 ttm_bo_unreserve(&bo->tbo);
729 if (!vmw_user_object_is_null(&vps->uo)) {
730 vmw_du_get_cursor_mob(vcp, vps);
731 vmw_du_cursor_plane_map_cm(vps);
739 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
740 struct drm_atomic_state *state)
742 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
744 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
746 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
747 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
748 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
749 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
750 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
751 struct vmw_bo *old_bo = NULL;
752 struct vmw_bo *new_bo = NULL;
753 s32 hotspot_x, hotspot_y;
756 hotspot_x = du->hotspot_x + new_state->hotspot_x;
757 hotspot_y = du->hotspot_y + new_state->hotspot_y;
759 du->cursor_surface = vmw_user_object_surface(&vps->uo);
761 if (vmw_user_object_is_null(&vps->uo)) {
762 vmw_cursor_update_position(dev_priv, false, 0, 0);
766 vps->cursor.hotspot_x = hotspot_x;
767 vps->cursor.hotspot_y = hotspot_y;
769 if (du->cursor_surface)
770 du->cursor_age = du->cursor_surface->snooper.age;
772 if (!vmw_user_object_is_null(&old_vps->uo)) {
773 old_bo = vmw_user_object_buffer(&old_vps->uo);
774 ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
779 if (!vmw_user_object_is_null(&vps->uo)) {
780 new_bo = vmw_user_object_buffer(&vps->uo);
781 if (old_bo != new_bo) {
782 ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
789 if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
791 * If it hasn't changed, avoid making the device do extra
792 * work by keeping the old cursor active.
794 struct vmw_cursor_plane_state tmp = old_vps->cursor;
795 old_vps->cursor = vps->cursor;
798 void *image = vmw_du_cursor_plane_acquire_image(vps);
800 vmw_cursor_update_image(dev_priv, vps, image,
803 hotspot_x, hotspot_y);
807 ttm_bo_unreserve(&old_bo->tbo);
809 ttm_bo_unreserve(&new_bo->tbo);
811 du->cursor_x = new_state->crtc_x + du->set_gui_x;
812 du->cursor_y = new_state->crtc_y + du->set_gui_y;
814 vmw_cursor_update_position(dev_priv, true,
815 du->cursor_x + hotspot_x,
816 du->cursor_y + hotspot_y);
818 du->core_hotspot_x = hotspot_x - du->hotspot_x;
819 du->core_hotspot_y = hotspot_y - du->hotspot_y;
824 * vmw_du_primary_plane_atomic_check - check if the new state is okay
826 * @plane: display plane
827 * @state: info on the new plane state, including the FB
829 * Check if the new state is settable given the current state. Other
830 * than what the atomic helper checks, we care about crtc fitting
831 * the FB and maintaining one active framebuffer.
833 * Returns 0 on success
835 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
836 struct drm_atomic_state *state)
838 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
840 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
842 struct drm_crtc_state *crtc_state = NULL;
843 struct drm_framebuffer *new_fb = new_state->fb;
844 struct drm_framebuffer *old_fb = old_state->fb;
848 * Ignore damage clips if the framebuffer attached to the plane's state
849 * has changed since the last plane update (page-flip). In this case, a
850 * full plane update should happen because uploads are done per-buffer.
852 if (old_fb != new_fb)
853 new_state->ignore_damage_clips = true;
856 crtc_state = drm_atomic_get_new_crtc_state(state,
859 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
860 DRM_PLANE_NO_SCALING,
861 DRM_PLANE_NO_SCALING,
868 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
870 * @plane: cursor plane
871 * @state: info on the new plane state
873 * This is a chance to fail if the new cursor state does not fit
876 * Returns 0 on success
878 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
879 struct drm_atomic_state *state)
881 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
884 struct drm_crtc_state *crtc_state = NULL;
885 struct vmw_surface *surface = NULL;
886 struct drm_framebuffer *fb = new_state->fb;
889 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
892 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
893 DRM_PLANE_NO_SCALING,
894 DRM_PLANE_NO_SCALING,
903 /* A lot of the code assumes this */
904 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
905 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
906 new_state->crtc_w, new_state->crtc_h);
910 if (!vmw_framebuffer_to_vfb(fb)->bo) {
911 surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
916 (!surface->snooper.image && !surface->res.guest_memory_bo)) {
917 DRM_ERROR("surface not suitable for cursor\n");
926 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
927 struct drm_atomic_state *state)
929 struct vmw_private *vmw = vmw_priv(crtc->dev);
930 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
932 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
933 int connector_mask = drm_connector_mask(&du->connector);
934 bool has_primary = new_state->plane_mask &
935 drm_plane_mask(crtc->primary);
938 * This is fine in general, but broken userspace might expect
939 * some actual rendering so give a clue as why it's blank.
941 if (new_state->enable && !has_primary)
942 drm_dbg_driver(&vmw->drm,
943 "CRTC without a primary plane will be blank.\n");
946 if (new_state->connector_mask != connector_mask &&
947 new_state->connector_mask != 0) {
948 DRM_ERROR("Invalid connectors configuration\n");
953 * Our virtual device does not have a dot clock, so use the logical
954 * clock value as the dot clock.
956 if (new_state->mode.crtc_clock == 0)
957 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
963 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
964 struct drm_atomic_state *state)
966 vmw_vkms_crtc_atomic_begin(crtc, state);
970 * vmw_du_crtc_duplicate_state - duplicate crtc state
973 * Allocates and returns a copy of the crtc state (both common and
974 * vmw-specific) for the specified crtc.
976 * Returns: The newly allocated crtc state, or NULL on failure.
978 struct drm_crtc_state *
979 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
981 struct drm_crtc_state *state;
982 struct vmw_crtc_state *vcs;
984 if (WARN_ON(!crtc->state))
987 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
994 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
1001 * vmw_du_crtc_reset - creates a blank vmw crtc state
1004 * Resets the atomic state for @crtc by freeing the state pointer (which
1005 * might be NULL, e.g. at driver load time) and allocating a new empty state
1008 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1010 struct vmw_crtc_state *vcs;
1014 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1016 kfree(vmw_crtc_state_to_vcs(crtc->state));
1019 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1022 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1026 __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1031 * vmw_du_crtc_destroy_state - destroy crtc state
1033 * @state: state object to destroy
1035 * Destroys the crtc state (both common and vmw-specific) for the
1039 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1040 struct drm_crtc_state *state)
1042 drm_atomic_helper_crtc_destroy_state(crtc, state);
1047 * vmw_du_plane_duplicate_state - duplicate plane state
1050 * Allocates and returns a copy of the plane state (both common and
1051 * vmw-specific) for the specified plane.
1053 * Returns: The newly allocated plane state, or NULL on failure.
1055 struct drm_plane_state *
1056 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1058 struct drm_plane_state *state;
1059 struct vmw_plane_state *vps;
1061 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1069 memset(&vps->cursor, 0, sizeof(vps->cursor));
1071 /* Each ref counted resource needs to be acquired again */
1072 vmw_user_object_ref(&vps->uo);
1075 __drm_atomic_helper_plane_duplicate_state(plane, state);
1082 * vmw_du_plane_reset - creates a blank vmw plane state
1085 * Resets the atomic state for @plane by freeing the state pointer (which might
1086 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1088 void vmw_du_plane_reset(struct drm_plane *plane)
1090 struct vmw_plane_state *vps;
1093 vmw_du_plane_destroy_state(plane, plane->state);
1095 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1098 DRM_ERROR("Cannot allocate vmw_plane_state\n");
1102 __drm_atomic_helper_plane_reset(plane, &vps->base);
1107 * vmw_du_plane_destroy_state - destroy plane state
1109 * @state: state object to destroy
1111 * Destroys the plane state (both common and vmw-specific) for the
1115 vmw_du_plane_destroy_state(struct drm_plane *plane,
1116 struct drm_plane_state *state)
1118 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1120 /* Should have been freed by cleanup_fb */
1121 vmw_user_object_unref(&vps->uo);
1123 drm_atomic_helper_plane_destroy_state(plane, state);
1128 * vmw_du_connector_duplicate_state - duplicate connector state
1129 * @connector: DRM connector
1131 * Allocates and returns a copy of the connector state (both common and
1132 * vmw-specific) for the specified connector.
1134 * Returns: The newly allocated connector state, or NULL on failure.
1136 struct drm_connector_state *
1137 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1139 struct drm_connector_state *state;
1140 struct vmw_connector_state *vcs;
1142 if (WARN_ON(!connector->state))
1145 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1152 __drm_atomic_helper_connector_duplicate_state(connector, state);
1159 * vmw_du_connector_reset - creates a blank vmw connector state
1160 * @connector: DRM connector
1162 * Resets the atomic state for @connector by freeing the state pointer (which
1163 * might be NULL, e.g. at driver load time) and allocating a new empty state
1166 void vmw_du_connector_reset(struct drm_connector *connector)
1168 struct vmw_connector_state *vcs;
1171 if (connector->state) {
1172 __drm_atomic_helper_connector_destroy_state(connector->state);
1174 kfree(vmw_connector_state_to_vcs(connector->state));
1177 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1180 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1184 __drm_atomic_helper_connector_reset(connector, &vcs->base);
1189 * vmw_du_connector_destroy_state - destroy connector state
1190 * @connector: DRM connector
1191 * @state: state object to destroy
1193 * Destroys the connector state (both common and vmw-specific) for the
1197 vmw_du_connector_destroy_state(struct drm_connector *connector,
1198 struct drm_connector_state *state)
1200 drm_atomic_helper_connector_destroy_state(connector, state);
1203 * Generic framebuffer code
1207 * Surface framebuffer code
1210 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1212 struct vmw_framebuffer_surface *vfbs =
1213 vmw_framebuffer_to_vfbs(framebuffer);
1215 drm_framebuffer_cleanup(framebuffer);
1216 vmw_user_object_unref(&vfbs->uo);
1222 * vmw_kms_readback - Perform a readback from the screen system to
1223 * a buffer-object backed framebuffer.
1225 * @dev_priv: Pointer to the device private structure.
1226 * @file_priv: Pointer to a struct drm_file identifying the caller.
1227 * Must be set to NULL if @user_fence_rep is NULL.
1228 * @vfb: Pointer to the buffer-object backed framebuffer.
1229 * @user_fence_rep: User-space provided structure for fence information.
1230 * Must be set to non-NULL if @file_priv is non-NULL.
1231 * @vclips: Array of clip rects.
1232 * @num_clips: Number of clip rects in @vclips.
1234 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1237 int vmw_kms_readback(struct vmw_private *dev_priv,
1238 struct drm_file *file_priv,
1239 struct vmw_framebuffer *vfb,
1240 struct drm_vmw_fence_rep __user *user_fence_rep,
1241 struct drm_vmw_rect *vclips,
1244 switch (dev_priv->active_display_unit) {
1245 case vmw_du_screen_object:
1246 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1247 user_fence_rep, vclips, num_clips,
1249 case vmw_du_screen_target:
1250 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1251 user_fence_rep, NULL, vclips, num_clips,
1255 "Readback called with invalid display system.\n");
1261 static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
1262 struct drm_file *file_priv,
1263 unsigned int *handle)
1265 struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
1266 struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
1270 return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
1273 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1274 .create_handle = vmw_framebuffer_surface_create_handle,
1275 .destroy = vmw_framebuffer_surface_destroy,
1276 .dirty = drm_atomic_helper_dirtyfb,
1279 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1280 struct vmw_user_object *uo,
1281 struct vmw_framebuffer **out,
1282 const struct drm_mode_fb_cmd2
1286 struct drm_device *dev = &dev_priv->drm;
1287 struct vmw_framebuffer_surface *vfbs;
1288 struct vmw_surface *surface;
1291 /* 3D is only supported on HWv8 and newer hosts */
1292 if (dev_priv->active_display_unit == vmw_du_legacy)
1295 surface = vmw_user_object_surface(uo);
1301 if (!drm_any_plane_has_format(&dev_priv->drm,
1302 mode_cmd->pixel_format,
1303 mode_cmd->modifier[0])) {
1304 drm_dbg(&dev_priv->drm,
1305 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1306 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1310 /* Surface must be marked as a scanout. */
1311 if (unlikely(!surface->metadata.scanout))
1314 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1315 surface->metadata.num_sizes != 1 ||
1316 surface->metadata.base_size.width < mode_cmd->width ||
1317 surface->metadata.base_size.height < mode_cmd->height ||
1318 surface->metadata.base_size.depth != 1)) {
1319 DRM_ERROR("Incompatible surface dimensions "
1320 "for requested mode.\n");
1324 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1330 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1331 memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
1332 vmw_user_object_ref(&vfbs->uo);
1336 ret = drm_framebuffer_init(dev, &vfbs->base.base,
1337 &vmw_framebuffer_surface_funcs);
1344 vmw_user_object_unref(&vfbs->uo);
1351 * Buffer-object framebuffer code
1354 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1355 struct drm_file *file_priv,
1356 unsigned int *handle)
1358 struct vmw_framebuffer_bo *vfbd =
1359 vmw_framebuffer_to_vfbd(fb);
1360 return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1363 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1365 struct vmw_framebuffer_bo *vfbd =
1366 vmw_framebuffer_to_vfbd(framebuffer);
1368 drm_framebuffer_cleanup(framebuffer);
1369 vmw_bo_unreference(&vfbd->buffer);
1374 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1375 .create_handle = vmw_framebuffer_bo_create_handle,
1376 .destroy = vmw_framebuffer_bo_destroy,
1377 .dirty = drm_atomic_helper_dirtyfb,
1380 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1382 struct vmw_framebuffer **out,
1383 const struct drm_mode_fb_cmd2
1387 struct drm_device *dev = &dev_priv->drm;
1388 struct vmw_framebuffer_bo *vfbd;
1389 unsigned int requested_size;
1392 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1393 if (unlikely(requested_size > bo->tbo.base.size)) {
1394 DRM_ERROR("Screen buffer object size is too small "
1395 "for requested mode.\n");
1399 if (!drm_any_plane_has_format(&dev_priv->drm,
1400 mode_cmd->pixel_format,
1401 mode_cmd->modifier[0])) {
1402 drm_dbg(&dev_priv->drm,
1403 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1404 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1408 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1414 vfbd->base.base.obj[0] = &bo->tbo.base;
1415 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1416 vfbd->base.bo = true;
1417 vfbd->buffer = vmw_bo_reference(bo);
1420 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1421 &vmw_framebuffer_bo_funcs);
1428 vmw_bo_unreference(&bo);
1436 * vmw_kms_srf_ok - check if a surface can be created
1438 * @dev_priv: Pointer to device private struct.
1439 * @width: requested width
1440 * @height: requested height
1442 * Surfaces need to be less than texture size
1445 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1447 if (width > dev_priv->texture_max_width ||
1448 height > dev_priv->texture_max_height)
1455 * vmw_kms_new_framebuffer - Create a new framebuffer.
1457 * @dev_priv: Pointer to device private struct.
1458 * @uo: Pointer to user object to wrap the kms framebuffer around.
1459 * Either the buffer or surface inside the user object must be NULL.
1460 * @mode_cmd: Frame-buffer metadata.
1462 struct vmw_framebuffer *
1463 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1464 struct vmw_user_object *uo,
1465 const struct drm_mode_fb_cmd2 *mode_cmd)
1467 struct vmw_framebuffer *vfb = NULL;
1470 /* Create the new framebuffer depending one what we have */
1471 if (vmw_user_object_surface(uo)) {
1472 ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
1474 } else if (uo->buffer) {
1475 ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
1482 return ERR_PTR(ret);
1488 * Generic Kernel modesetting functions
1491 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1492 struct drm_file *file_priv,
1493 const struct drm_mode_fb_cmd2 *mode_cmd)
1495 struct vmw_private *dev_priv = vmw_priv(dev);
1496 struct vmw_framebuffer *vfb = NULL;
1497 struct vmw_user_object uo = {0};
1500 /* returns either a bo or surface */
1501 ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
1504 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1505 mode_cmd->handles[0], mode_cmd->handles[0]);
1510 if (vmw_user_object_surface(&uo) &&
1511 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1512 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1513 dev_priv->texture_max_width,
1514 dev_priv->texture_max_height);
1520 vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
1527 /* vmw_user_object_lookup takes one ref so does new_fb */
1528 vmw_user_object_unref(&uo);
1531 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1532 return ERR_PTR(ret);
1539 * vmw_kms_check_display_memory - Validates display memory required for a
1542 * @num_rects: number of drm_rect in rects
1543 * @rects: array of drm_rect representing the topology to validate indexed by
1547 * 0 on success otherwise negative error code
1549 static int vmw_kms_check_display_memory(struct drm_device *dev,
1551 struct drm_rect *rects)
1553 struct vmw_private *dev_priv = vmw_priv(dev);
1554 struct drm_rect bounding_box = {0};
1555 u64 total_pixels = 0, pixel_mem, bb_mem;
1558 for (i = 0; i < num_rects; i++) {
1560 * For STDU only individual screen (screen target) is limited by
1561 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1563 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1564 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1565 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1566 VMW_DEBUG_KMS("Screen size not supported.\n");
1570 /* Bounding box upper left is at (0,0). */
1571 if (rects[i].x2 > bounding_box.x2)
1572 bounding_box.x2 = rects[i].x2;
1574 if (rects[i].y2 > bounding_box.y2)
1575 bounding_box.y2 = rects[i].y2;
1577 total_pixels += (u64) drm_rect_width(&rects[i]) *
1578 (u64) drm_rect_height(&rects[i]);
1581 /* Virtual svga device primary limits are always in 32-bpp. */
1582 pixel_mem = total_pixels * 4;
1585 * For HV10 and below prim_bb_mem is vram size. When
1586 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1587 * limit on primary bounding box
1589 if (pixel_mem > dev_priv->max_primary_mem) {
1590 VMW_DEBUG_KMS("Combined output size too large.\n");
1594 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1595 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1596 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1597 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1599 if (bb_mem > dev_priv->max_primary_mem) {
1600 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1609 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1611 * @state: The atomic state pointer containing the new atomic state
1614 * This function returns the new crtc state if it's part of the state update.
1615 * Otherwise returns the current crtc state. It also makes sure that the
1616 * crtc mutex is locked.
1618 * Returns: A valid crtc state pointer or NULL. It may also return a
1619 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1621 static struct drm_crtc_state *
1622 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1624 struct drm_crtc_state *crtc_state;
1626 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1628 lockdep_assert_held(&crtc->mutex.mutex.base);
1630 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1632 if (ret != 0 && ret != -EALREADY)
1633 return ERR_PTR(ret);
1635 crtc_state = crtc->state;
1642 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1643 * from the same fb after the new state is committed.
1644 * @dev: The drm_device.
1645 * @state: The new state to be checked.
1649 * -EINVAL on invalid state,
1650 * -EDEADLK if modeset locking needs to be rerun.
1652 static int vmw_kms_check_implicit(struct drm_device *dev,
1653 struct drm_atomic_state *state)
1655 struct drm_framebuffer *implicit_fb = NULL;
1656 struct drm_crtc *crtc;
1657 struct drm_crtc_state *crtc_state;
1658 struct drm_plane_state *plane_state;
1660 drm_for_each_crtc(crtc, dev) {
1661 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1663 if (!du->is_implicit)
1666 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1667 if (IS_ERR(crtc_state))
1668 return PTR_ERR(crtc_state);
1670 if (!crtc_state || !crtc_state->enable)
1674 * Can't move primary planes across crtcs, so this is OK.
1675 * It also means we don't need to take the plane mutex.
1677 plane_state = du->primary.state;
1678 if (plane_state->crtc != crtc)
1682 implicit_fb = plane_state->fb;
1683 else if (implicit_fb != plane_state->fb)
1691 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1693 * @state: the driver state object
1696 * 0 on success otherwise negative error code
1698 static int vmw_kms_check_topology(struct drm_device *dev,
1699 struct drm_atomic_state *state)
1701 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1702 struct drm_rect *rects;
1703 struct drm_crtc *crtc;
1707 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1712 drm_for_each_crtc(crtc, dev) {
1713 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1714 struct drm_crtc_state *crtc_state;
1716 i = drm_crtc_index(crtc);
1718 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1719 if (IS_ERR(crtc_state)) {
1720 ret = PTR_ERR(crtc_state);
1727 if (crtc_state->enable) {
1728 rects[i].x1 = du->gui_x;
1729 rects[i].y1 = du->gui_y;
1730 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1731 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1740 /* Determine change to topology due to new atomic state */
1741 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1742 new_crtc_state, i) {
1743 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1744 struct drm_connector *connector;
1745 struct drm_connector_state *conn_state;
1746 struct vmw_connector_state *vmw_conn_state;
1748 if (!du->pref_active && new_crtc_state->enable) {
1749 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1755 * For vmwgfx each crtc has only one connector attached and it
1756 * is not changed so don't really need to check the
1757 * crtc->connector_mask and iterate over it.
1759 connector = &du->connector;
1760 conn_state = drm_atomic_get_connector_state(state, connector);
1761 if (IS_ERR(conn_state)) {
1762 ret = PTR_ERR(conn_state);
1766 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1767 vmw_conn_state->gui_x = du->gui_x;
1768 vmw_conn_state->gui_y = du->gui_y;
1771 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1780 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1783 * @state: the driver state object
1785 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1786 * us to assign a value to mode->crtc_clock so that
1787 * drm_calc_timestamping_constants() won't throw an error message
1790 * Zero for success or -errno
1793 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1794 struct drm_atomic_state *state)
1796 struct drm_crtc *crtc;
1797 struct drm_crtc_state *crtc_state;
1798 bool need_modeset = false;
1801 ret = drm_atomic_helper_check(dev, state);
1805 ret = vmw_kms_check_implicit(dev, state);
1807 VMW_DEBUG_KMS("Invalid implicit state\n");
1811 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1812 if (drm_atomic_crtc_needs_modeset(crtc_state))
1813 need_modeset = true;
1817 return vmw_kms_check_topology(dev, state);
1822 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1823 .fb_create = vmw_kms_fb_create,
1824 .atomic_check = vmw_kms_atomic_check_modeset,
1825 .atomic_commit = drm_atomic_helper_commit,
1828 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1829 struct drm_file *file_priv,
1830 struct vmw_framebuffer *vfb,
1831 struct vmw_surface *surface,
1833 int32_t destX, int32_t destY,
1834 struct drm_vmw_rect *clips,
1837 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1838 &surface->res, destX, destY,
1839 num_clips, 1, NULL, NULL);
1843 int vmw_kms_present(struct vmw_private *dev_priv,
1844 struct drm_file *file_priv,
1845 struct vmw_framebuffer *vfb,
1846 struct vmw_surface *surface,
1848 int32_t destX, int32_t destY,
1849 struct drm_vmw_rect *clips,
1854 switch (dev_priv->active_display_unit) {
1855 case vmw_du_screen_target:
1856 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1857 &surface->res, destX, destY,
1858 num_clips, 1, NULL, NULL);
1860 case vmw_du_screen_object:
1861 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1862 sid, destX, destY, clips,
1867 "Present called with invalid display system.\n");
1874 vmw_cmd_flush(dev_priv, false);
1880 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1882 if (dev_priv->hotplug_mode_update_property)
1885 dev_priv->hotplug_mode_update_property =
1886 drm_property_create_range(&dev_priv->drm,
1887 DRM_MODE_PROP_IMMUTABLE,
1888 "hotplug_mode_update", 0, 1);
1892 vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
1894 struct vmw_private *vmw = vmw_priv(old_state->dev);
1895 struct drm_crtc *crtc;
1896 struct drm_crtc_state *old_crtc_state;
1899 drm_atomic_helper_commit_tail(old_state);
1901 if (vmw->vkms_enabled) {
1902 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1903 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1904 (void)old_crtc_state;
1905 flush_work(&du->vkms.crc_generator_work);
1910 static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
1911 .atomic_commit_tail = vmw_atomic_commit_tail,
1914 int vmw_kms_init(struct vmw_private *dev_priv)
1916 struct drm_device *dev = &dev_priv->drm;
1918 static const char *display_unit_names[] = {
1926 drm_mode_config_init(dev);
1927 dev->mode_config.funcs = &vmw_kms_funcs;
1928 dev->mode_config.min_width = 1;
1929 dev->mode_config.min_height = 1;
1930 dev->mode_config.max_width = dev_priv->texture_max_width;
1931 dev->mode_config.max_height = dev_priv->texture_max_height;
1932 dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
1933 dev->mode_config.helper_private = &vmw_mode_config_helpers;
1935 drm_mode_create_suggested_offset_properties(dev);
1936 vmw_kms_create_hotplug_mode_update_property(dev_priv);
1938 ret = vmw_kms_stdu_init_display(dev_priv);
1940 ret = vmw_kms_sou_init_display(dev_priv);
1941 if (ret) /* Fallback */
1942 ret = vmw_kms_ldu_init_display(dev_priv);
1944 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
1945 drm_info(&dev_priv->drm, "%s display unit initialized\n",
1946 display_unit_names[dev_priv->active_display_unit]);
1951 int vmw_kms_close(struct vmw_private *dev_priv)
1956 * Docs says we should take the lock before calling this function
1957 * but since it destroys encoders and our destructor calls
1958 * drm_encoder_cleanup which takes the lock we deadlock.
1960 drm_mode_config_cleanup(&dev_priv->drm);
1961 if (dev_priv->active_display_unit == vmw_du_legacy)
1962 ret = vmw_kms_ldu_close_display(dev_priv);
1967 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1968 struct drm_file *file_priv)
1970 struct drm_vmw_cursor_bypass_arg *arg = data;
1971 struct vmw_display_unit *du;
1972 struct drm_crtc *crtc;
1975 mutex_lock(&dev->mode_config.mutex);
1976 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1978 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1979 du = vmw_crtc_to_du(crtc);
1980 du->hotspot_x = arg->xhot;
1981 du->hotspot_y = arg->yhot;
1984 mutex_unlock(&dev->mode_config.mutex);
1988 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
1994 du = vmw_crtc_to_du(crtc);
1996 du->hotspot_x = arg->xhot;
1997 du->hotspot_y = arg->yhot;
2000 mutex_unlock(&dev->mode_config.mutex);
2005 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2006 unsigned width, unsigned height, unsigned pitch,
2007 unsigned bpp, unsigned depth)
2009 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2010 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2011 else if (vmw_fifo_have_pitchlock(vmw_priv))
2012 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2013 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2014 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2015 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2016 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2018 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2019 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2020 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2028 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2032 return (pitch * height) < (u64)dev_priv->vram_size;
2036 * vmw_du_update_layout - Update the display unit with topology from resolution
2037 * plugin and generate DRM uevent
2038 * @dev_priv: device private
2039 * @num_rects: number of drm_rect in rects
2040 * @rects: toplogy to update
2042 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2043 unsigned int num_rects, struct drm_rect *rects)
2045 struct drm_device *dev = &dev_priv->drm;
2046 struct vmw_display_unit *du;
2047 struct drm_connector *con;
2048 struct drm_connector_list_iter conn_iter;
2049 struct drm_modeset_acquire_ctx ctx;
2050 struct drm_crtc *crtc;
2053 /* Currently gui_x/y is protected with the crtc mutex */
2054 mutex_lock(&dev->mode_config.mutex);
2055 drm_modeset_acquire_init(&ctx, 0);
2057 drm_for_each_crtc(crtc, dev) {
2058 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2060 if (ret == -EDEADLK) {
2061 drm_modeset_backoff(&ctx);
2068 drm_connector_list_iter_begin(dev, &conn_iter);
2069 drm_for_each_connector_iter(con, &conn_iter) {
2070 du = vmw_connector_to_du(con);
2071 if (num_rects > du->unit) {
2072 du->pref_width = drm_rect_width(&rects[du->unit]);
2073 du->pref_height = drm_rect_height(&rects[du->unit]);
2074 du->pref_active = true;
2075 du->gui_x = rects[du->unit].x1;
2076 du->gui_y = rects[du->unit].y1;
2078 du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
2079 du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2080 du->pref_active = false;
2085 drm_connector_list_iter_end(&conn_iter);
2087 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2088 du = vmw_connector_to_du(con);
2089 if (num_rects > du->unit) {
2090 drm_object_property_set_value
2091 (&con->base, dev->mode_config.suggested_x_property,
2093 drm_object_property_set_value
2094 (&con->base, dev->mode_config.suggested_y_property,
2097 drm_object_property_set_value
2098 (&con->base, dev->mode_config.suggested_x_property,
2100 drm_object_property_set_value
2101 (&con->base, dev->mode_config.suggested_y_property,
2104 con->status = vmw_du_connector_detect(con, true);
2107 drm_modeset_drop_locks(&ctx);
2108 drm_modeset_acquire_fini(&ctx);
2109 mutex_unlock(&dev->mode_config.mutex);
2111 drm_sysfs_hotplug_event(dev);
2116 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2117 u16 *r, u16 *g, u16 *b,
2119 struct drm_modeset_acquire_ctx *ctx)
2121 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2124 for (i = 0; i < size; i++) {
2125 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2127 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2128 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2129 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2135 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2140 enum drm_connector_status
2141 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2143 uint32_t num_displays;
2144 struct drm_device *dev = connector->dev;
2145 struct vmw_private *dev_priv = vmw_priv(dev);
2146 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2148 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2150 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2152 connector_status_connected : connector_status_disconnected);
2156 * vmw_guess_mode_timing - Provide fake timings for a
2157 * 60Hz vrefresh mode.
2159 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2160 * members filled in.
2162 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2164 mode->hsync_start = mode->hdisplay + 50;
2165 mode->hsync_end = mode->hsync_start + 50;
2166 mode->htotal = mode->hsync_end + 50;
2168 mode->vsync_start = mode->vdisplay + 50;
2169 mode->vsync_end = mode->vsync_start + 50;
2170 mode->vtotal = mode->vsync_end + 50;
2172 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2177 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2178 * @dev: drm device for the ioctl
2179 * @data: data pointer for the ioctl
2180 * @file_priv: drm file for the ioctl call
2182 * Update preferred topology of display unit as per ioctl request. The topology
2183 * is expressed as array of drm_vmw_rect.
2185 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2188 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2189 * device limit on topology, x + w and y + h (lower right) cannot be greater
2190 * than INT_MAX. So topology beyond these limits will return with error.
2193 * Zero on success, negative errno on failure.
2195 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2196 struct drm_file *file_priv)
2198 struct vmw_private *dev_priv = vmw_priv(dev);
2199 struct drm_mode_config *mode_config = &dev->mode_config;
2200 struct drm_vmw_update_layout_arg *arg =
2201 (struct drm_vmw_update_layout_arg *)data;
2202 const void __user *user_rects;
2203 struct drm_vmw_rect *rects;
2204 struct drm_rect *drm_rects;
2205 unsigned rects_size;
2208 if (!arg->num_outputs) {
2209 struct drm_rect def_rect = {0, 0,
2210 VMWGFX_MIN_INITIAL_WIDTH,
2211 VMWGFX_MIN_INITIAL_HEIGHT};
2212 vmw_du_update_layout(dev_priv, 1, &def_rect);
2214 } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
2218 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2219 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2221 if (unlikely(!rects))
2224 user_rects = (void __user *)(unsigned long)arg->rects;
2225 ret = copy_from_user(rects, user_rects, rects_size);
2226 if (unlikely(ret != 0)) {
2227 DRM_ERROR("Failed to get rects.\n");
2232 drm_rects = (struct drm_rect *)rects;
2234 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2235 for (i = 0; i < arg->num_outputs; i++) {
2236 struct drm_vmw_rect curr_rect;
2238 /* Verify user-space for overflow as kernel use drm_rect */
2239 if ((rects[i].x + rects[i].w > INT_MAX) ||
2240 (rects[i].y + rects[i].h > INT_MAX)) {
2245 curr_rect = rects[i];
2246 drm_rects[i].x1 = curr_rect.x;
2247 drm_rects[i].y1 = curr_rect.y;
2248 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2249 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2251 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2252 drm_rects[i].x1, drm_rects[i].y1,
2253 drm_rects[i].x2, drm_rects[i].y2);
2256 * Currently this check is limiting the topology within
2257 * mode_config->max (which actually is max texture size
2258 * supported by virtual device). This limit is here to address
2259 * window managers that create a big framebuffer for whole
2262 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2263 drm_rects[i].x2 > mode_config->max_width ||
2264 drm_rects[i].y2 > mode_config->max_height) {
2265 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2266 drm_rects[i].x1, drm_rects[i].y1,
2267 drm_rects[i].x2, drm_rects[i].y2);
2273 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2276 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2284 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2285 * on a set of cliprects and a set of display units.
2287 * @dev_priv: Pointer to a device private structure.
2288 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2289 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2290 * Cliprects are given in framebuffer coordinates.
2291 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2292 * be NULL. Cliprects are given in source coordinates.
2293 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2294 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2295 * @num_clips: Number of cliprects in the @clips or @vclips array.
2296 * @increment: Integer with which to increment the clip counter when looping.
2297 * Used to skip a predetermined number of clip rects.
2298 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2300 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2301 struct vmw_framebuffer *framebuffer,
2302 const struct drm_clip_rect *clips,
2303 const struct drm_vmw_rect *vclips,
2304 s32 dest_x, s32 dest_y,
2307 struct vmw_kms_dirty *dirty)
2309 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2310 struct drm_crtc *crtc;
2314 dirty->dev_priv = dev_priv;
2316 /* If crtc is passed, no need to iterate over other display units */
2318 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2320 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2322 struct drm_plane *plane = crtc->primary;
2324 if (plane->state->fb == &framebuffer->base)
2325 units[num_units++] = vmw_crtc_to_du(crtc);
2329 for (k = 0; k < num_units; k++) {
2330 struct vmw_display_unit *unit = units[k];
2331 s32 crtc_x = unit->crtc.x;
2332 s32 crtc_y = unit->crtc.y;
2333 s32 crtc_width = unit->crtc.mode.hdisplay;
2334 s32 crtc_height = unit->crtc.mode.vdisplay;
2335 const struct drm_clip_rect *clips_ptr = clips;
2336 const struct drm_vmw_rect *vclips_ptr = vclips;
2339 if (dirty->fifo_reserve_size > 0) {
2340 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2341 dirty->fifo_reserve_size);
2345 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2347 dirty->num_hits = 0;
2348 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2349 vclips_ptr += increment) {
2354 * Select clip array type. Note that integer type
2355 * in @clips is unsigned short, whereas in @vclips
2359 dirty->fb_x = (s32) clips_ptr->x1;
2360 dirty->fb_y = (s32) clips_ptr->y1;
2361 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2363 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2366 dirty->fb_x = vclips_ptr->x;
2367 dirty->fb_y = vclips_ptr->y;
2368 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2370 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2374 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2375 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2377 /* Skip this clip if it's outside the crtc region */
2378 if (dirty->unit_x1 >= crtc_width ||
2379 dirty->unit_y1 >= crtc_height ||
2380 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2383 /* Clip right and bottom to crtc limits */
2384 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2386 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2389 /* Clip left and top to crtc limits */
2390 clip_left = min_t(s32, dirty->unit_x1, 0);
2391 clip_top = min_t(s32, dirty->unit_y1, 0);
2392 dirty->unit_x1 -= clip_left;
2393 dirty->unit_y1 -= clip_top;
2394 dirty->fb_x -= clip_left;
2395 dirty->fb_y -= clip_top;
2400 dirty->fifo_commit(dirty);
2407 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2408 * cleanup and fencing
2409 * @dev_priv: Pointer to the device-private struct
2410 * @file_priv: Pointer identifying the client when user-space fencing is used
2411 * @ctx: Pointer to the validation context
2412 * @out_fence: If non-NULL, returned refcounted fence-pointer
2413 * @user_fence_rep: If non-NULL, pointer to user-space address area
2414 * in which to copy user-space fence info
2416 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2417 struct drm_file *file_priv,
2418 struct vmw_validation_context *ctx,
2419 struct vmw_fence_obj **out_fence,
2420 struct drm_vmw_fence_rep __user *
2423 struct vmw_fence_obj *fence = NULL;
2424 uint32_t handle = 0;
2427 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2429 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2430 file_priv ? &handle : NULL);
2431 vmw_validation_done(ctx, fence);
2433 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2434 ret, user_fence_rep, fence,
2439 vmw_fence_obj_unreference(&fence);
2443 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2446 * @dev_priv: Pointer to a device private struct.
2448 * Sets up the implicit placement property unless it's already set up.
2451 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2453 if (dev_priv->implicit_placement_property)
2456 dev_priv->implicit_placement_property =
2457 drm_property_create_range(&dev_priv->drm,
2458 DRM_MODE_PROP_IMMUTABLE,
2459 "implicit_placement", 0, 1);
2463 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2465 * @dev: Pointer to the drm device
2466 * Return: 0 on success. Negative error code on failure.
2468 int vmw_kms_suspend(struct drm_device *dev)
2470 struct vmw_private *dev_priv = vmw_priv(dev);
2472 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2473 if (IS_ERR(dev_priv->suspend_state)) {
2474 int ret = PTR_ERR(dev_priv->suspend_state);
2476 DRM_ERROR("Failed kms suspend: %d\n", ret);
2477 dev_priv->suspend_state = NULL;
2487 * vmw_kms_resume - Re-enable modesetting and restore state
2489 * @dev: Pointer to the drm device
2490 * Return: 0 on success. Negative error code on failure.
2492 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2493 * to call this function without a previous vmw_kms_suspend().
2495 int vmw_kms_resume(struct drm_device *dev)
2497 struct vmw_private *dev_priv = vmw_priv(dev);
2500 if (WARN_ON(!dev_priv->suspend_state))
2503 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2504 dev_priv->suspend_state = NULL;
2510 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2512 * @dev: Pointer to the drm device
2514 void vmw_kms_lost_device(struct drm_device *dev)
2516 drm_atomic_helper_shutdown(dev);
2520 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2521 * @update: The closure structure.
2523 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2524 * update on display unit.
2526 * Return: 0 on success or a negative error code on failure.
2528 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2530 struct drm_plane_state *state = update->plane->state;
2531 struct drm_plane_state *old_state = update->old_state;
2532 struct drm_atomic_helper_damage_iter iter;
2533 struct drm_rect clip;
2535 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2536 uint32_t reserved_size = 0;
2537 uint32_t submit_size = 0;
2538 uint32_t curr_size = 0;
2539 uint32_t num_hits = 0;
2545 * Iterate in advance to check if really need plane update and find the
2546 * number of clips that actually are in plane src for fifo allocation.
2548 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2549 drm_atomic_for_each_plane_damage(&iter, &clip)
2555 if (update->vfb->bo) {
2556 struct vmw_framebuffer_bo *vfbbo =
2557 container_of(update->vfb, typeof(*vfbbo), base);
2560 * For screen targets we want a mappable bo, for everything else we want
2561 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2562 * is not screen target then mob's shouldn't be available.
2564 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2565 vmw_bo_placement_set(vfbbo->buffer,
2566 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2567 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2569 WARN_ON(update->dev_priv->has_mob);
2570 vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2572 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2574 struct vmw_framebuffer_surface *vfbs =
2575 container_of(update->vfb, typeof(*vfbs), base);
2576 struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
2578 ret = vmw_validation_add_resource(&val_ctx, &surf->res,
2579 0, VMW_RES_DIRTY_NONE, NULL,
2586 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2590 reserved_size = update->calc_fifo_size(update, num_hits);
2591 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2597 cmd_next = cmd_start;
2599 if (update->post_prepare) {
2600 curr_size = update->post_prepare(update, cmd_next);
2601 cmd_next += curr_size;
2602 submit_size += curr_size;
2605 if (update->pre_clip) {
2606 curr_size = update->pre_clip(update, cmd_next, num_hits);
2607 cmd_next += curr_size;
2608 submit_size += curr_size;
2616 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2617 drm_atomic_for_each_plane_damage(&iter, &clip) {
2618 uint32_t fb_x = clip.x1;
2619 uint32_t fb_y = clip.y1;
2621 vmw_du_translate_to_crtc(state, &clip);
2623 curr_size = update->clip(update, cmd_next, &clip, fb_x,
2625 cmd_next += curr_size;
2626 submit_size += curr_size;
2628 bb.x1 = min_t(int, bb.x1, clip.x1);
2629 bb.y1 = min_t(int, bb.y1, clip.y1);
2630 bb.x2 = max_t(int, bb.x2, clip.x2);
2631 bb.y2 = max_t(int, bb.y2, clip.y2);
2634 curr_size = update->post_clip(update, cmd_next, &bb);
2635 submit_size += curr_size;
2637 if (reserved_size < submit_size)
2640 vmw_cmd_commit(update->dev_priv, submit_size);
2642 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2643 update->out_fence, NULL);
2647 vmw_validation_revert(&val_ctx);
2650 vmw_validation_unref_lists(&val_ctx);
2655 * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2657 * @connector: the drm connector, part of a DU container
2658 * @mode: drm mode to check
2660 * Returns MODE_OK on success, or a drm_mode_status error code.
2662 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2663 struct drm_display_mode *mode)
2665 enum drm_mode_status ret;
2666 struct drm_device *dev = connector->dev;
2667 struct vmw_private *dev_priv = vmw_priv(dev);
2668 u32 assumed_cpp = 4;
2670 if (dev_priv->assume_16bpp)
2673 ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
2674 dev_priv->texture_max_height);
2678 if (!vmw_kms_validate_mode_vram(dev_priv,
2679 mode->hdisplay * assumed_cpp,
2687 * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2689 * @connector: the drm connector, part of a DU container
2691 * Returns the number of added modes.
2693 int vmw_connector_get_modes(struct drm_connector *connector)
2695 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2696 struct drm_device *dev = connector->dev;
2697 struct vmw_private *dev_priv = vmw_priv(dev);
2698 struct drm_display_mode *mode = NULL;
2699 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2700 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2701 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2702 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2708 /* Add preferred mode */
2709 mode = drm_mode_duplicate(dev, &prefmode);
2713 mode->hdisplay = du->pref_width;
2714 mode->vdisplay = du->pref_height;
2715 vmw_guess_mode_timing(mode);
2716 drm_mode_set_name(mode);
2718 drm_mode_probed_add(connector, mode);
2719 drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2721 /* Probe connector for all modes not exceeding our geom limits */
2722 max_width = dev_priv->texture_max_width;
2723 max_height = dev_priv->texture_max_height;
2725 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2726 max_width = min(dev_priv->stdu_max_width, max_width);
2727 max_height = min(dev_priv->stdu_max_height, max_height);
2730 num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
2735 struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
2738 vmw_user_bo_ref(uo->buffer);
2739 else if (uo->surface)
2740 vmw_surface_reference(uo->surface);
2744 void vmw_user_object_unref(struct vmw_user_object *uo)
2747 vmw_user_bo_unref(&uo->buffer);
2748 else if (uo->surface)
2749 vmw_surface_unreference(&uo->surface);
2753 vmw_user_object_buffer(struct vmw_user_object *uo)
2757 else if (uo->surface)
2758 return uo->surface->res.guest_memory_bo;
2762 struct vmw_surface *
2763 vmw_user_object_surface(struct vmw_user_object *uo)
2766 return uo->buffer->dumb_surface;
2770 void *vmw_user_object_map(struct vmw_user_object *uo)
2772 struct vmw_bo *bo = vmw_user_object_buffer(uo);
2775 return vmw_bo_map_and_cache(bo);
2778 void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
2780 struct vmw_bo *bo = vmw_user_object_buffer(uo);
2783 return vmw_bo_map_and_cache_size(bo, size);
2786 void vmw_user_object_unmap(struct vmw_user_object *uo)
2788 struct vmw_bo *bo = vmw_user_object_buffer(uo);
2793 /* Fence the mob creation so we are guarateed to have the mob */
2794 ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
2799 vmw_bo_pin_reserved(bo, false);
2801 ttm_bo_unreserve(&bo->tbo);
2804 bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
2808 if (!uo || vmw_user_object_is_null(uo))
2811 bo = vmw_user_object_buffer(uo);
2816 WARN_ON(bo->map.bo && !bo->map.virtual);
2817 return bo->map.virtual;
2820 bool vmw_user_object_is_null(struct vmw_user_object *uo)
2822 return !uo->buffer && !uo->surface;